def detect(self): """ :return: """ banner("Detecting USB Card Reader") print("Make sure the USB Reader is removed ...") if not yn_choice("Is the reader removed?"): sys.exit() usb_out = set(Shell.execute("lsusb").splitlines()) print("Now plug in the Reader ...") if not yn_choice("Is the reader plugged in?"): sys.exit() usb_in = set(Shell.execute("lsusb").splitlines()) writer = usb_in - usb_out if len(writer) == 0: print( "ERROR: we did not detect the devise, make sure it is plugged." ) sys.exit() else: banner("Detected Card Writer") print("\n".join(writer)) print()
def prepare_sdcard(): """ ensures a card is detected and unmounted :return: True if prepared :rtype: bool """ # Console.ok(f'sudo eject -t {device}') os.system(f'sudo eject -t {device}') time.sleep(3) device_basename = os.path.basename(device) result = Shell.run('lsblk') if device_basename in result.split(): for line in result.splitlines(): line = line.split() if device_basename in line[0] and len(line) > 6: Console.ok(f'sudo umount {line[6]}') os.system(f'sudo umount {line[6]}') return True else: Console.error("SD Card not detected. Please reinsert " "card reader. ") if not yn_choice("Card reader re-inserted? No to cancel " "operation"): return False else: time.sleep(3) return prepare_sdcard()
def _install_on_osx(self): """ installs version 1.0.0 on macOS see https://multipass.run/docs/installing-on-macos """ # test if you are in sudo, if not result = Shell.run("sudo -v") if "Sorry, user grey may not run sudo" in result: Console.error("this program must be run as sudo") if not self.dryrun: return "" # download url = "https://github.com/canonical/multipass/releases/download/v1.0.0/multipass-1.0.0+mac-Darwin.pkg" pkg = "multipass-1.0.0+mac-Darwin.pkg" # install try: if self.dryrun: Console.ok("Dryrun:") Console.ok("") Console.ok(f"curl {url} --output {pkg}") Console.ok(f"open {pkg}") else: Shell.run(f"curl {url} --output {pkg}") os.system(f"open {pkg}") except: Console.error("problem downloading multipass") # remove if not self.dryrun and \ yn_choice("do you want to delete the downloaded file?"): Shell.rm(f"{pkg}")
def configure_wifi(self, ssid, psk, interactive=False): """ sets the wifi. ONly works for psk based wifi :param ssid: the ssid :param psk: the psk :return: """ wifi = textwrap.dedent("""\ ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev update_config=1 country=US network={{ ssid=\"{network}\" psk=\"{pwd}\" key_mgmt=WPA-PSK }}""".format(network=ssid, pwd=psk)) print(wifi) path = "/etc/wpa_supplicant/wpa_supplicant.conf" if self.dryrun: print("DRY RUN - skipping:") print("Writing wifi ssid:{} psk:{} to {}".format(ssid, psk, path)) return elif interactive: if not yn_choice("About write wifi info. Please confirm:"): return pathlib.Path(self.filename(path)).write_text(wifi)
def _continue(msg=""): global step if step: kubeman.banner(msg) print(screen.columns * "-") print() if yn_choice(f"CONTINUE: {msg}?"): return else: if yn_choice(f"I ask yo a final time! CONTINUE: {msg}?"): return kubeman.hline() print() raise RuntimeError("Workflow interrupted") print(screen.columns * "-") print()
def write_key(self, key=None, path=None, mode="wb", force=False): """ Writes the key to the path, creating directories as needed" @param key: The data being written yca key instance @param path: Full path including file name @param mode: The mode for writing to the file @param force: Automatically overwrite file if it exists """ # Check if the key is empty if key is None: Console.error("Key is empty") sys.exit() if path is None: Console.error("Path is empty") sys.exit() # Create directories as needed for the key dirs = os.path.dirname(path) if not os.path.exists(dirs): Shell.mkdir(dirs) if not force: # Check if file exists at locations if os.path.exists(path): Console.info(f"{path} already exists") ovwr_r = yn_choice(message=f"overwrite {path}?", default="N") if not ovwr_r: Console.info(f"Not overwriting {path}. Quitting") sys.exit() # Write the file writefd(filename=path, content=key, mode=mode)
def check_exists(path): if os.path.exists(path): Console.info(f"{path} already exists") ovwr_r = yn_choice(message=f"overwrite {path}?", default="N") if not ovwr_r: Console.info(f"Not overwriting {path}. Quitting") sys.exit()
def configure_wifi(self, ssid, psk=None, mp='/mount/pi', interactive=False): """ sets the wifi. ONly works for psk based wifi :param ssid: the ssid :param psk: the psk :return: """ if psk is not None: wifi = textwrap.dedent("""\ ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev update_config=1 country=US network={{ ssid=\"{network}\" psk=\"{pwd}\" key_mgmt=WPA-PSK }}""".format(network=ssid, pwd=psk)) else: wifi = textwrap.dedent("""\ ctrl_interface=DIR=/var/run/wpa_supplicant GROUP=netdev update_config=1 country=US network={{ ssid=\"{network}\" key_mgmt=NONE }}""".format(network=ssid)) # Per fix provided by Gregor, we use this path to get around rfkill block on boot path = f"{mp}/boot/wpa_supplicant.conf" # path = f"{mp}/etc/wpa_supplicant/wpa_supplicant.conf" if self.dryrun: print("DRY RUN - skipping:") print("Writing wifi ssid:{} psk:{} to {}".format(ssid, psk, path)) return elif interactive: if not yn_choice("About write wifi info. Please confirm:"): return # pathlib.Path(self.filename(path)).write_text(wifi) with open(path, 'w') as f: f.write(wifi)
def format_device(self, device='dev/sdX', unmount=True, yes=False, verbose=True): """ Formats device with one FAT32 partition WARNING: make sure you have the right device, this command could potentially erase your OS :param device: The device on which we format :type device: str :param unmount: :type unmount: :param yes: :type yes: :param verbose: :type verbose: :return: :rtype: """ _title = "UNTITLED" def prepare_sdcard(): """ ensures a card is detected and unmounted :return: True if prepared :rtype: bool """ # Console.ok(f'sudo eject -t {device}') os.system(f'sudo eject -t {device}') time.sleep(3) device_basename = os.path.basename(device) result = Shell.run('lsblk') if device_basename in result.split(): for line in result.splitlines(): line = line.split() if device_basename in line[0] and len(line) > 6: Console.ok(f'sudo umount {line[6]}') os.system(f'sudo umount {line[6]}') return True else: Console.error("SD Card not detected. Please reinsert " "card reader. ") if not yn_choice("Card reader re-inserted? No to cancel " "operation"): return False else: time.sleep(3) return prepare_sdcard() Sudo.password() if os_is_linux() or os_is_pi(): if verbose: banner(f"format {device}") else: print(f"format {device}") if not prepare_sdcard(): return False # TODO Gregor verify commenting out the below is ok # if os_is_mac(): # self.mount(device=device) user = os.environ.get('USER') script = textwrap.dedent(f""" ls /media/{user} sudo parted {device} --script -- mklabel msdos sudo parted {device} --script -- mkpart primary fat32 1MiB 100% sudo mkfs.vfat -n {_title} -F32 {device}1 sudo parted {device} --script print""").strip().splitlines() for line in script: _execute(line, line) os.system("sudo sync") if unmount: self.unmount(device=device ) # without dev we unmount but do not eject. If # we completely eject, burn will fail to detect the device. os.system("sudo sync") Console.ok("Formatted SD Card") elif os_is_mac(): details = USB.get_dev_from_diskutil() # checking if string contains list element valid = any(entry in device for entry in details) if not valid: Console.error( f"this device can not be used for formatting: {device}") return False elif len(details) > 1: Console.error( "For security reasons, please only put one USB writer in") Console.msg(f"we found {details}") return False else: details = USB.get_from_diskutil() USB.print_details(details) print() if yes or yn_choice( f"\nDo you like to format {device} as {_title}"): _execute( f"Formatting {device} as {_title}", f"sudo diskutil eraseDisk FAT32 {_title} MBRFormat {device}" ) else: raise NotImplementedError("Not implemented for this OS") return True
def burn_all(self, image="latest", device=None, blocksize="4M", progress=True, hostnames=None, ips=None, key=None, password=None, ssid=None, psk=None, formatSD=False): """ :param image: :param device: :param blocksize: :param progress: :param hostnames: :param ips: :param key: :return: """ #:param devices: string with device letters # # define the dev # devices = {} # dict of {device_name: empty_status} # # probe the dev # #pprint(Burner().info()) info_statuses = Burner().info() # If the user specifies a particular device, we only care about that device if device is not None: for dev in device: devices[dev] = info_statuses[dev]['empty'] info_statuses = {} for device in info_statuses.keys(): #print("call the info command on the device and " # "figure out if an empty card is in it") # change the status based on what you found devices[device] = info_statuses[device]['empty'] # if we detect a non empty card we interrupt and tell # which is not empty. # (print out status of the devices in a table) device_statuses = devices.values() if False in device_statuses: print("\nEmpty status of devices:") for dev, empty_status in devices.items(): x = "" if empty_status else "not " print(f"Device {dev} is {x}empty") print() # detect if there is an issue with the cards, readers # TODO what exactly should be done here? # ask if this is ok to burn otherwise burn_all = yn_choice("Burn non-empty devices too?") # if yes burn all of them for which we have status "empty card" if not burn_all: # delete from devices dict any non-empty devices devices_to_delete = [] for device in devices.keys(): if devices[device] == False: devices_to_delete.append( device) # can't delete while iterating for device in devices_to_delete: del devices[device] print("Burning these devices:") print(' '.join(devices.keys())) keys = list(devices.keys()) for i in range(len(hostnames)): #for device, status in devices.items(): device = keys[i % len( keys )] # We might be using one device slot to burn multiple cards status = devices[device] hostname = hostnames[i] ip = None if not ips else ips[i] self.burn(image, device, blocksize, progress, hostname, ip, key, password, ssid, psk, formatSD) os.system('tput bel') # ring the terminal bell to notify user if i < len(hostnames) - 1: if (i + 1) != ((i + 1) % len(keys)): choice = input( f"Slot {keys[(i + 1) % len(keys)]} needs to be reused. Do you wish to continue? [y/n] " ) while (choice != 'y') and (choice != 'n'): choice = input("Please use [y/n] ") if choice == 'n': break input('Insert next card and press enter...') print('Burning next card...') print() i += 1 print(f"You burned {i} SD Cards") print("Done :)")
def activate_ssh(self, public_key, debug=False, interactive=False): """ sets the public key path and copies the it to the SD card :param public_key: the public key location :return: True if successful """ # # this has bugs as we have not yet thought about debug, interactive, yesno # yesno we can take form cloudmesh.common # raise NotImplementedError # set the keypath self.keypath = public_key if debug: print(self.keypath) if not os.path.isfile(self.keypath): ERROR("key does not exist", self.keypath) sys.exit() if self.dryrun: print("DRY RUN - skipping:") print("Activate ssh authorized_keys pkey:{}".format(public_key)) return elif interactive: if not yn_choice("About to write ssh config. Please confirm:"): return # activate ssh by creating an empty ssh file in the boot drive pathlib.Path(self.filename("/ssh")).touch() # Write the content of the ssh rsa to the authorized_keys file key = pathlib.Path(public_key).read_text() ssh_dir = self.filename("/home/pi/.ssh") print(ssh_dir) if not os.path.isdir(ssh_dir): os.makedirs(ssh_dir) auth_keys = ssh_dir / "authorized_keys" auth_keys.write_text(key) # We need to fix the permissions on the .ssh folder but it is hard to # get this working from a host OS because the host OS must have a user # and group with the same pid and gid as the raspberry pi OS. On the PI # the pi uid and gid are both 1000. # All of the following do not work on OS X: # execute("chown 1000:1000 {ssh_dir}".format(ssh_dir=ssh_dir)) # shutil.chown(ssh_dir, user=1000, group=1000) # shutil.chown(ssh_dir, user=1000, group=1000) # execute("sudo chown 1000:1000 {ssh_dir}".format(ssh_dir=ssh_dir)) # Changing the modification attributes does work, but we can just handle # this the same way as the previous chown issue for consistency. # os.chmod(ssh_dir, 0o700) # os.chmod(auth_keys, 0o600) # /etc/rc.local runs at boot with root permissions - since the file # already exists modifying it shouldn't change ownership or permissions # so it should run correctly. One lingering question is: should we clean # this up later? new_lines = textwrap.dedent(''' # FIX298-START: Fix permissions for .ssh directory if [ -d "/home/pi/.ssh" ]; then chown pi:pi /home/pi/.ssh chmod 700 /home/pi/.ssh if [ -f "/home/pi/.ssh/authorized_keys" ]; then chown pi:pi /home/pi/.ssh/authorized_keys chmod 600 /home/pi/.ssh/authorized_keys fi fi # FIX298-END ''') rc_local = self.filename("/etc/rc.local") new_rc_local = "" already_updated = False with rc_local.open() as f: for line in f: if "FIX298" in line: already_updated = True break if line == "exit 0\n": new_rc_local += new_lines new_rc_local += line else: new_rc_local += line if not already_updated: with rc_local.open("w") as f: f.write(new_rc_local) self.disable_password_ssh()
def is_note_book_done_yn(): yn_choice( "Please run the jupyter notebook now and continue after it completed")
def do_host(self, args, arguments): """ :: Usage: host scp NAMES SOURCE DESTINATION [--dryrun] host ssh NAMES COMMAND [--dryrun] [--output=FORMAT] host config NAMES --ips=IPS [--user=USER] [--key=PUBLIC] host config --proxy=PROXY NAMES [--user=USER] [--append] [--local=no] [--StrictHostKeyChecking=no] [--cluster=name] host config NAMES [--user=USER] [--append] [--local=no] [--StrictHostKeyChecking=no] [--cluster=name] host find [NAMES] [--user=USER] [--table|--json] [--verbose] host check NAMES [--user=USER] [--key=PUBLIC] host key create NAMES [--user=USER] [--dryrun] [--output=FORMAT] host key list NAMES [--output=FORMAT] host key setup NAMES host key gather NAMES [--authorized_keys] [FILE] host key scatter NAMES [FILE] [--user=USER] host key add NAMES [FILE] host key delete NAMES [FILE] host key access NAMES [FILE] [--user=USER] host tunnel create NAMES [--port=PORT] host mac NAMES [--eth] [--wlan] [--output=FORMAT] host setup WORKERS [LAPTOP] host shutdown NAMES host reboot NAMES host adduser NAMES USER host passwd NAMES USER host addsudo NAMES USER host deluser NAMES USER host ping NAMES host info NAMES This command does some useful things. Arguments: FILE a file name Options: --dryrun shows what would be done but does not execute --output=FORMAT the format of the output --port=PORT starting local port for tunnel assignment --local=no do not append .local to manager hostname [default: yes] --user=USER username for manager and workers [default: pi] --ips=IPS ip addresses of the manager and workers --StrictHostKeyChecking=no if set to yes, strict host checking is enforced [default: no] --ProxyJump=no if set to yes, a proxyjump is performed for each worker through the manager [default: yes] Description: host scp NAMES SOURCE DESTINATION Uses scp to transfer Source to NAMES:DESTINATION. host ssh NAMES COMMAND runs the command on all specified hosts Example: ssh red[01-10] \"uname -a\" host key create NAMES create a ~/.ssh/id_rsa and id_rsa.pub on all hosts specified Example: ssh key create "red[01-10]" host key list NAMES list all id_rsa.pub keys from all hosts specifed Example: ssh key list red[01-10] host key gather HOSTS FILE gathers all keys from file FILE including the one from localhost. ssh key gather "red[01-10]" keys.txt host key scatter HOSTS FILE [--user=USER] copies all keys from file FILE to authorized_keys on all hosts, but also makes sure that the users ~/.ssh/id_rsa.pub key is in the file. If provided the optional user, it will add the keys to that user's .ssh directory. This is often required when adding a new user in which case HOSTS should still a sudo user with ssh currently enabled. 1) adds ~/.id_rsa.pub to the FILE only if its not already in it 2) removes all duplicated keys Example: ssh key scatter "red[01-10]" ssh key scatter pi@red[01-10] keys.txt --user=alice host key add NAMES FILE Adds all keys in FILE into the authorized_keys of NAMES. Example: cms host key add worker001 ~/.ssh/id_rsa.pub host key delete NAMES FILE Deletes all keys in fILE from authorized_keys of NAMES if they exist. Example cms host key delete worker001 ~/.ssh/id_rsa.pub host key scp NAMES FILE copies all keys from file FILE to authorized_keys on all hosts but also makes sure that the users ~/.ssh/id_rsa.pub key is in the file and removes duplicates, e.g. it calls fix before upload Example: ssh key list red[01-10] > pubkeys.txt ssh key scp red[01-10] pubkeys.txt host config NAMES IPS [--user=USER] [--key=PUBLIC] generates an ssh config file tempalte that can be added to your .ssh/config file Example: cms host config "red,red[01-03]" "198.168.1.[1-4]" --user=pi host check NAMES [--user=USER] [--key=PUBLIC] This command is used to test if you can login to the specified hosts. It executes the hostname command and compares it. It provides a table with a sucess column cms host check "red,red[01-03]" +-------+---------+--------+ | host | success | stdout | +-------+---------+--------+ | red | True | red | | red01 | True | red01 | | red02 | True | red02 | | red03 | True | red03 | +-------+---------+--------+ host tunnel create NAMES [--port=PORT] This command is used to create a persistent local port forward on the host to permit ssh tunnelling from the wlan to the physical network (eth). This registers an autossh service in systemd with the defualt port starting at 8001. Example: cms host tunnel create red00[1-3] host mac NAMES returns the list of mac addresses of the named pis. host setup WORKERS [LAPTOP] Executes the following steps cms bridge create --interface='wlan0' cms host key create red00[1-3] cms host key gather red00[1-3],[email protected] keys.txt cms host key scatter red00[1-3],localhost keys.txt rm keys.txt cms host tunnel create red00[1-3] host shutdown NAMES Shutsdown NAMES with `sudo shutdown -h now`. If localhost in names, it is shutdown last. host reboot NAMES Reboots NAMES with `sudo reboot`. If localhost in names, it is rebooted last. host adduser NAMES USER Adds a user with user name USER to the hosts identified by NAMES. Password is disabled, see host passwd to enable. host addsudo NAMES USER Adds sudo rights to USER at NAMES host passwd NAMES USER Changes the password for USER at NAMES host deluser NAMES USER Deleted USER from NAMES. Home directory will be removed. host config proxy PROXY NAMES This adds to your ~/.ssh/config file a ProxyJump configuration to reach NAMES via PROXY. This is useful when the PROXY is acting as a network bridge for NAMES to your current device. Example: cms host config proxy [email protected] red00[1-2] """ def _print(results): arguments.output = arguments.output or 'table' if arguments.output in ['table', 'yaml']: print( Printer.write( results, order=['host', 'success', 'stdout', 'stderr'], output=arguments.output)) else: pprint(results) def _print_pis(results): arguments.output = arguments.output or 'table' if arguments.output in ['table', 'yaml']: print( Printer.write(results, order=[ 'name', 'ip', 'user', 'os', 'mac', 'model', 'memory', 'serial', ".local" ], output=arguments.output)) # not printed "revision" # not printed "hardware" else: pprint(results) def get_filename(filename, hosts): if filename is not None: return filename if type(hosts) == str: hosts = Parameter.expand(hosts) label = hosts[0] return path_expand(f"~/.ssh/cluster_keys_{label}") map_parameters(arguments, 'eth', 'wlan' 'dryrun', 'output', 'user', 'port', 'append', 'StrictHostKeyChecking', 'local', 'proxy', 'ips', 'cluster') dryrun = arguments.dryrun # VERBOSE(arguments) if dryrun: VERBOSE(arguments) if arguments.info: names = Parameter.expand(arguments.names) # check if .local # check if mesh network # check if static network # use arp - a di find hosts ips # if linux # dig +short -x 192.168.50.1 Console.error("Not yet Implemented") elif arguments.find: verbose = arguments["--verbose"] names = Parameter.expand(arguments.NAMES) # temporary so we can easy modify while not needing to update cloudmesh.common from cloudmesh.host.network import PiNetwork network = PiNetwork() pis = network.find_pis(user=arguments.user, verbose=verbose) if arguments["--json"]: print(pis) else: _print_pis(pis) elif arguments.mac: names = Parameter.expand(arguments.NAMES) if not arguments.eth and not arguments.wlan: arguments.eth = True arguments.wlan = True eth = 'cat /sys/class/net/eth0/address' wlan = 'cat /sys/class/net/wlan0/address' if arguments.eth: results = Host.ssh(hosts=names, command=eth, username=arguments.user) print("eth0:") _print(results) if arguments.wlan: results = Host.ssh(hosts=names, command=wlan, username=arguments.user) print("wlan0:") _print(results) elif arguments.setup: HostCreate.setup(workers=arguments.WORKERS, laptop=arguments.LAPTOP) elif arguments.scp and not arguments.key: result = Host.put(hosts=arguments.NAMES, source=arguments.SOURCE, destination=arguments.DESTINATION) _print(result) elif arguments.ping: names = Parameter.expand(arguments.NAMES) # print (names) results = Host.ping(hosts=names) _print(results) elif arguments.ssh: names = Parameter.expand(arguments.NAMES) # print (names) results = Host.ssh(hosts=names, command=arguments.COMMAND) _print(results) elif arguments.key and arguments.create: results = Host.ssh_keygen(hosts=arguments.NAMES, username=arguments.user, dryrun=dryrun) _print(results) elif arguments.key and arguments.list: names = Parameter.expand(arguments.NAMES) results = Host.ssh(hosts=names, command='cat .ssh/id_rsa.pub', username=arguments.user) _print(results) elif arguments.key and arguments.add: filename = get_filename(arguments.NAMES) if not os.path.isfile(filename): Console.error(f"Cannot find file {filename}") return # Copy to temp location Host.put(hosts=arguments.NAMES, source=filename, destination="~/.ssh/key.tmp") # Execute append command and remove command command = 'cat ~/.ssh/key.tmp >> ~/.ssh/authorized_keys && rm ~/.ssh/key.tmp' Host.ssh(hosts=arguments.NAMES, command=command) elif arguments.key and arguments.delete: Console.ok("key delete") filename = get_filename(arguments.FILE, arguments.NAMES) if not os.path.isfile(filename): Console.error(f"Cannot find file {filename}") return # Copy to temp location remote_temp = "~/.ssh/key.tmp" Host.put(hosts=arguments.NAMES, source=filename, destination=remote_temp) # grep can read multiple patterns from a file, one per line. Combine with # the options -v to output non-matching lines, and -F to match strings # instead of regex and -x to require that the whole line matches. command = f"""grep -Fvx -f {remote_temp} ~/.ssh/authorized_keys >remaining_keys && \ mv remaining_keys ~/.ssh/authorized_keys && \ rm {remote_temp}""" Host.ssh(hosts=arguments.NAMES, command=command) Console.ok(f"Delete keys from {filename} on {arguments.NAMES}") elif arguments.key and arguments.setup: label = Parameter.expand(arguments.NAMES)[0] filename = get_filename(arguments.FILE, arguments.NAMES) directory = os.path.dirname(filename) if directory: Shell.mkdir(directory) output = Host.gather_keys(username=arguments.user, hosts=arguments.NAMES, filename="~/.ssh/id_rsa.pub", key="~/.ssh/id_rsa", processors=3, dryrun=False) with open(filename, "w") as f: f.write(output) # scatter # place .ssh/config a trict host check to no elif arguments.key and arguments.gather: output = Host.gather_keys(username=arguments.user, hosts=arguments.NAMES, filename="~/.ssh/id_rsa.pub", key="~/.ssh/id_rsa", processors=3, dryrun=False) VERBOSE(arguments) filename = get_filename(arguments.FILE, arguments.NAMES) print(output) banner(f"Writing Keys to file {filename}") directory = os.path.dirname(filename) print('command directory', directory) if directory: Shell.mkdir(directory) if os.path.isfile(filename) and yn_choice( f'{filename} is not empty. Do you wish to overwrite it? (If no you will append).' ): with open(filename, "w") as f: f.write(output) else: with open(filename, "a") as f: f.write(output) elif arguments.key and arguments.scatter: # # this should be a function in Host # filename = get_filename(arguments.FILE, arguments.NAMES) names = arguments.NAMES user = arguments.user if not os.path.isfile(filename): Console.error("The file does not exist") return "" if not user: result = Host.put(hosts=names, source=filename, destination=".ssh/authorized_keys") _print(result) else: Console.info('SCP to ./temp_authorzied_keys_temp') result = Host.put(hosts=names, source=filename, destination="temp_authorized_keys_temp") _print(result) Console.info(f'Mkdir /home/{user}/.ssh if not exist') command = f'sudo mkdir -p /home/' \ f'{user}/.ssh/' result = Host.ssh(hosts=names, command=command) _print(result) Console.info(f'Chown /home/{user}/.ssh to {user}') command = f'sudo chown {user}:{user} /home/' \ f'{user}/.ssh/' result = Host.ssh(hosts=names, command=command) _print(result) Console.info(f'Chmod /home/{user}/.ssh to 700') command = f'sudo chmod 700 /home/' \ f'{user}/.ssh/' result = Host.ssh(hosts=names, command=command) _print(result) Console.info(f'Mv temp_authorized_keys_temp to /home/' f'{user}/.ssh/authorized_keys') command = f'sudo mv temp_authorized_keys_temp /home/' \ f'{user}/.ssh/authorized_keys' result = Host.ssh(hosts=names, command=command) _print(result) Console.info(f'Chown /home/{user}/.ssh/authorized_keys to ' f'{user}') command = f'sudo chown {user}:{user} /home/' \ f'{user}/.ssh/authorized_keys' result = Host.ssh(hosts=names, command=command) _print(result) elif arguments.key and arguments.access: # # this should be a function in Host # names = arguments.NAMES user = arguments.user filename = arguments.FILE temp = path_expand("~/.cloudmesh/temp_config") if filename: config = readfile(filename) else: config = textwrap.dedent(""" Host * StrictHostKeyChecking no """).strip() writefile(temp, config) if not os.path.isfile(temp): Console.error("The file does not exist") return "" if not user: result = Host.put(hosts=names, source=temp, destination=".ssh/config") _print(result) else: Console.info(f'Mkdir /home/{user}/.ssh if not exist') command = f'sudo mkdir -p /home/' \ f'{user}/.ssh/' result = Host.ssh(hosts=names, command=command) _print(result) Console.info('SCP to ./temp_config') result = Host.put(hosts=names, source=temp, destination=".ssh/config") _print(result) Console.info(f'Chown /home/{user}/.ssh to {user}') command = f'sudo chown {user}:{user} /home/' \ f'{user}/.ssh/' result = Host.ssh(hosts=names, command=command) _print(result) Console.info(f'Chmod /home/{user}/.ssh to 700') command = f'sudo chmod 700 /home/' \ f'{user}/.ssh/' result = Host.ssh(hosts=names, command=command) _print(result) elif arguments.config and arguments.ips: print("NNNNNNNN") key = arguments.key or "~/.ssh/id_rsa.pub" result = Host.config(hosts=arguments.NAMES, ips=arguments.IPS, username=arguments.user, key=key) print(result) """ host config NAMES --ips=IPS [--user=USER] [--key=PUBLIC] host config --proxy=PROXY NAMES [--user=USER] [--append] [--local=no] [--StrictHostKeyChecking=no] host config NAMES [--user=USER] [--append] [--local=no] [--StrictHostKeyChecking=no] """ elif arguments.config: if str_bool(arguments.local): local_str = ".local" else: local_str = "" if str_bool(arguments.StrictHostKeyChecking): strict_host_str = "yes" else: strict_host_str = "no" names = Parameter.expand(arguments.NAMES) user = arguments.user if arguments.cluster: cluster = arguments.cluster else: # take the first name and remove spaces cluster = ''.join([i for i in names[0] if not i.isdigit()]) ssh_config_output = "" ssh_config_output = f'\n##### CLOUDMESH PROXY CONFIG {cluster} #####\n\n' if arguments.proxy: proxy_host = arguments.proxy proxy_jump = f' ProxyJump {proxy_host}\n' ssh_config_output += \ f'Host {proxy_host}\n' \ f' HostName {proxy_host}{local_str}\n' \ f' User {user}\n' \ f' PreferredAuthentications publickey\n' + \ f' StrictHostKeyChecking {strict_host_str}\n' ssh_config_output += '\n' else: proxy_jump = "" """ ssh_config_output = f'\n##### CLOUDMESH PROXY CONFIG {cluster} #####\n\n'\ f'Host {proxy_host}\n' \ f' HostName {proxy_host}{local_str}\n' \ f' User {user}\n' \ f' StrictHostKeyChecking {strict_host_str}\n\n' """ ### the local_str in the hostname may be wrong since its not manager for name in names: ssh_config_template = f'Host {name}\n' \ f' HostName {name}{local_str}\n' \ f' User {user}\n' \ f' PreferredAuthentications publickey\n' + \ f' StrictHostKeyChecking {strict_host_str}\n' + \ proxy_jump ssh_config_template += '\n' ssh_config_output += ssh_config_template ssh_config_output += f'##### CLOUDMESH PROXY CONFIG {cluster} #####\n' print('Adding to ~/.ssh/config') print(ssh_config_output) if not os.path.exists(path_expand('~/.ssh/config')): with open(path_expand('~/.ssh/config'), 'w+') as f: f.write(ssh_config_output) else: f = open(path_expand('~/.ssh/config'), 'r') lines = f.readlines() f.close() with open(path_expand('~/.ssh/config'), 'w+') as f: if f'##### CLOUDMESH PROXY CONFIG {cluster} #####\n' in lines: start = lines.index( f'##### CLOUDMESH PROXY CONFIG {cluster} #####\n') lines.reverse() end = lines.index( f'##### CLOUDMESH PROXY CONFIG {cluster} #####\n') end = len(lines) - end - 1 lines.reverse() original_config = lines[start:end + 1] del lines[start:end + 1] f.writelines(lines) if arguments.append: f.writelines(original_config) f.write(ssh_config_output) else: f.write(ssh_config_output) else: f.writelines(lines) f.write(ssh_config_output) f.close() elif arguments.check: key = arguments.key or "~/.ssh/id_rsa.pub" result = Host.check(hosts=arguments.NAMES, username=arguments.user, key=key) for entry in result: entry['success'] = entry['stdout'] == entry['host'] _print(result) elif arguments.tunnel and arguments.create: wlan_ip = Shell.run("hostname -I | awk '{print $2}'").strip() print(f'\nUsing wlan0 IP = {wlan_ip}') hostname = Shell.run("hostname").strip() print(f'Using cluster hostname = {hostname}') names = Parameter.expand(arguments.NAMES) port = arguments.port or "8001" ssh_config_output = f'Host {hostname}\n' \ f' HostName {hostname}.local\n' \ f' User pi\n\n' for name in names: service_name = f"autossh-{name}.service" service_template = "[Unit]\n" \ f"Description=AutoSSH tunnel service to {name} on local port " \ f"{port}\n" \ "After=multi-user.target\n\n" \ "[Service]\n" \ "User=pi\n" \ "Group=pi\n" \ 'Environment="AUTOSSH_GATETIME=0"\n' \ 'ExecStart=/usr/bin/autossh -M 0 -o "ServerAliveInterval 30" ' \ '-o "ServerAliveCountMax 3" -i ' \ '/home/pi/.ssh/id_rsa -NL ' \ f'{wlan_ip}:{port}:localhost:22 p' \ f'i@{name}\n\n' \ "[Install]\n" \ "WantedBy=multi-user.target" ssh_config_template = f'Host {name}\n' \ f' HostName {hostname}.local\n' \ f' User pi\n' \ f' Port {port}\n\n' ssh_config_output += ssh_config_template Sudo.writefile(f'/etc/systemd/system/{service_name}', service_template) port = str(int(port) + 1) os.system('sudo systemctl daemon-reload') for name in names: servicename = f"autossh-{name}.service" os.system(f'sudo systemctl start {servicename}') os.system(f'sudo systemctl enable {servicename}') print('\nTunnels created.\n\nPlease place the following in your ' 'remote machine\'s (i.e. laptop) ~/.ssh/config file to ' 'alias simple ssh access (i.e. ssh red001).') banner('copy to ~/.ssh/config on remote host (i.e laptop)') print(ssh_config_output) elif arguments.shutdown or arguments.reboot: if arguments.shutdown: command = 'sudo shutdown -h now' elif arguments.reboot: command = 'sudo reboot' names = Parameter.expand(arguments.NAMES) hostname = Shell.run("hostname").strip() localhost = None if "localhost" in names: names.remove("localhost") localhost = True if hostname in names: names.remove(hostname) localhost = True manager, workers = Host.get_hostnames(names) if workers: Console.info(f'Executing `{command}` for {workers}') Host.ssh(hosts=workers, command=command) if manager: Console.info(f'Executing `{command}` for {manager}') Host.ssh(hosts=manager, command=command) #_print(results) # results can be misleading becuase there is a race between the # shutdown and the error code being returned from the ssh processes. if localhost: os.system(command) elif arguments.adduser: names = Parameter.expand(arguments.NAMES) user = arguments.USER localhost = None if 'localhost' in names: localhost = 'localhost' elif platform.node() in names: localhost = platform.node() if localhost in names: print('\nAdding user to localhost') result = Shell.run(f'sudo adduser {user} ' f'--disabled-password ' f'--gecos "" ') print(result) names.remove(localhost) if len(names) > 0: command = f"sudo adduser {user} --disabled-password --gecos ',' " results = Host.ssh(hosts=names, command=command) _print(results) elif arguments.passwd: names = Parameter.expand(arguments.NAMES) user = arguments.USER localhost = None if 'localhost' in names: localhost = 'localhost' elif platform.node() in names: localhost = platform.node() if localhost in names: print("\nSetting password on localhost, please provide user " "password") result = os.system(f'sudo passwd {user}') print(result) names.remove(localhost) if len(names) > 0: print("\nSetting password on remote hosts, please enter user " "password\n") password = getpass("Please enter the user password:"******"{password}\n{password}" | sudo passwd {user}' results = Host.ssh(hosts=names, command=command) _print(results) elif arguments.addsudo: names = Parameter.expand(arguments.NAMES) user = arguments.USER localhost = None if 'localhost' in names: localhost = 'localhost' elif platform.node() in names: localhost = platform.node() if localhost in names: print('\nAdding user to sudo group on localhost') result = Shell.run(f'sudo adduser {user} sudo') print(result) names.remove(localhost) if len(names) > 0: command = f'sudo adduser {user} sudo' results = Host.ssh(hosts=names, command=command) _print(results) elif arguments.deluser: names = Parameter.expand(arguments.NAMES) user = arguments.USER if 'localhost' in names: localhost = 'localhost' elif platform.node() in names: localhost = platform.node() if localhost in names: print('\nDeleting user on localhost') result = Shell.run(f'sudo userdel -r {user}') print(result) names.remove(localhost) if len(names) > 0: command = f'sudo userdel -r {user}' results = Host.ssh(hosts=names, command=command) _print(results) return ""
def do_burn(self, args, arguments): """ :: Usage: burn gui [--hostname=HOSTNAME] [--ip=IP] [--ssid=SSID] [--wifipassword=PSK] [--bs=BLOCKSIZE] [--dryrun] [--no_diagram] burn ubuntu NAMES [--inventory=INVENTORY] [--ssid=SSID] [-f] [--wifipassword=PSK] [-v] --device=DEVICE [--country=COUNTRY] [--upgrade] burn raspberry NAMES --device=DEVICE [--inventory=INVENTORY] [--ssid=SSID] [--wifipassword=PSK] [--country=COUNTRY] [--password=PASSWORD] [-v] [-f] burn firmware check burn firmware update burn install burn load --device=DEVICE burn format --device=DEVICE burn imager [TAG...] burn mount [--device=DEVICE] [--os=OS] burn unmount [--device=DEVICE] [--os=OS] burn network list [--ip=IP] [--used] burn network burn info [--device=DEVICE] burn image versions [--details] [--refresh] [--yaml] burn image ls burn image delete [--image=IMAGE] burn image get [--url=URL] [TAG...] burn backup [--device=DEVICE] [--to=DESTINATION] burn copy [--device=DEVICE] [--from=DESTINATION] burn shrink [--image=IMAGE] burn cluster --device=DEVICE --hostname=HOSTNAME [--burning=BURNING] [--ip=IP] [--ssid=SSID] [--wifipassword=PSK] [--bs=BLOCKSIZE] [--os=OS] [-y] [--imaged] [--set_passwd] burn create [--image=IMAGE] [--device=DEVICE] [--burning=BURNING] [--hostname=HOSTNAME] [--ip=IP] [--sshkey=KEY] [--blocksize=BLOCKSIZE] [--passwd=PASSWD] [--ssid=SSID] [--wifipassword=PSK] [--format] [--tag=TAG] [--inventory=INVENTORY] [--name=NAME] [-y] burn sdcard [TAG...] [--device=DEVICE] [-y] burn set [--hostname=HOSTNAME] [--ip=IP] [--key=KEY] [--keyboard=COUNTRY] [--cmdline=CMDLINE] burn enable ssh burn wifi --ssid=SSID [--passwd=PASSWD] [--country=COUNTRY] burn check [--device=DEVICE] burn mac --hostname=HOSTNAME Options: -h --help Show this screen. --version Show version. --image=IMAGE The image filename, e.g. 2019-09-26-raspbian-buster.img --device=DEVICE The device, e.g. /dev/sdX --hostname=HOSTNAME The hostnames of the cluster --ip=IP The IP addresses of the cluster --key=KEY The name of the SSH key file --blocksize=BLOCKSIZE The blocksise to burn [default: 4M] --burning=BURNING The hosts to be burned Arguments: TAG Keyword tags to identify an image Files: This is not fully thought through and needs to be documented ~/.cloudmesh/images Location where the images will be stored for reuse Description: cms burn create --inventory=INVENTORY --device=DEVICE --name=NAME Will refer to a specified cloudmesh inventory file (see cms help inventory). Will search the configurations for NAME inside of INVENTORY and will burn to DEVICE. Supports parameter expansion. cms burn create --passwd=PASSWD if the passwd flag is added the default password is queried from the commandline and added to all SDCards if the flag is omitted login via the password is disabled and only login via the sshkey is allowed Network cms burn network list Lists the ip addresses that are on the same network +------------+---------------+----------+-----------+ | Name | IP | Status | Latency | |------------+---------------+----------+-----------| | Router | 192.168.1.1 | up | 0.0092s | | iPhone | 192.168.1.4 | up | 0.061s | | red01 | 192.168.1.46 | up | 0.0077s | | laptop | 192.168.1.78 | up | 0.058s | | unkown | 192.168.1.126 | up | 0.14s | | red03 | 192.168.1.158 | up | 0.0037s | | red02 | 192.168.1.199 | up | 0.0046s | | red | 192.168.1.249 | up | 0.00021s | +------------+----------------+----------+-----------+ cms burn network list [--used] Lists the used ip addresses as a comma separated parameter list 192.168.50.1,192.168.50.4,... cms burn network address Lists the own network address +---------+----------------+----------------+ | Label | Local | Broadcast | |---------+----------------+----------------| | wlan0 | 192.168.1.12 | 192.168.1.255 | +---------+----------------+----------------+ cms burn firmware check Checks if the firmware on the Pi is up to date cms burn firmware update Checks and updates the firmware on the Pi cms burn install Installs a program to shrink img files. THis is useful, after you created a backup to make the backup smaller and allow faster burning in case of recovery This command is not supported on MacOS cms burn load --device=DEVICE Loads the sdcard into the USB drive. Thi sis similar to loading a cdrom drive. It s the opposite to eject cms burn format --device=DEVICE Formats the SDCard in the specified device. Be careful it is the correct device. cms burn info will help you to identifying it cms burn mount [--device=DEVICE] [--os=OS] Mounts the file systems available on the SDCard cms burn unmount [--device=DEVICE] [--os=OS] Unmounts the mounted file systems from the SDCard cms burn info [--device=DEVICE] Provides useful information about the SDCard cms burn image versions [--refresh] [--yaml] The images that you like to burn onto your SDCard can be cached locally with the image command. The available images for the PI can be found when using the --refresh option. If you do not specify it it reads a copy of the image list from our cache cms burn image ls Lists all downloaded images in our cache. You can download them with the cms burn image get command cms burn image delete [--image=IMAGE] Deletes the specified image. The name can be found with the image ls command cms burn image get [--url=URL] [TAG...] Downloads a specific image or the latest image. The tag are a number of words separated by a space that must occur in the tag that you find in the versions command cms burn backup [--device=DEVICE] [--to=DESTINATION] This command requires you to install pishrink previously with cms burn install Backs up a SDCard to the given location. cms burn copy [--device=DEVICE] [--from=DESTINATION] Copies the file form the destination on the SDCard this is the same as the SDCard command. we will in future remove one cms burn shrink [--image=IMAGE] Shrinks the size of a backup or image file that is on your local file system. It can only be used for .img files This command is not supported on MacOS. cms burn create [--image=IMAGE] [--device=DEVICE] [--hostname=HOSTNAME] [--ip=IP] [--sshkey=KEY] [--blocksize=BLOCKSIZE] [--passwd=PASSWD] [--ssid=SSID] [--wifipassword=PSK] [--format] This command not only can format the SDCard, but also initializes it with specific values cms burn sdcard [TAG...] [--device=DEVICE] this burns the sd card, see also copy and create cms burn set [--hostname=HOSTNAME] [--ip=IP] [--key=KEY] [--mount=MOUNTPOINT] [--keyboard=COUNTRY] [--cmdline=CMDLINE] Sets specific values on the sdcard after it has ben created with the create, copy or sdcard command a --ssh is missing from this command cms burn enable ssh [--mount=MOUNTPOINT] Enables the ssh server once it is booted cms burn wifi --ssid=SSID [--passwd=PASSWD] [--country=COUNTRY] Sets the wifi ssid and password after the card is created, copied, or the sdcard is used. The option country option expects an ISO 3166-1 two digit country code. The default is "US" and the option not required if suitable. See https://en.wikipedia.org/wiki/ISO_3166-1 for other countries. cms burn check [--device=DEVICE] Lists the parameters that were set with the set or create command Examples: ( \\ is not shown) > cms burn create --image=2019-09-26-raspbian-buster-lite > --device=/dev/mmcblk0 > --hostname=red[5-7] > --ip=192.168.1.[5-7] > --sshkey=id_rsa > cms burn image get latest > cms burn image get https://downloads.raspberrypi.org/ > raspbian_lite/images/ > raspbian_lite-2018-10-11/2018-10-09-raspbian-stretch-lite.zip > cms burn image delete 2019-09-26-raspbian-buster-lite """ map_parameters(arguments, "details", "refresh", "device", "dryrun", "burning", "hostname", "ip", "sshkey", "blocksize", "ssid", "url", "imaged", "key", "keyboard", "passwd", "wifipassword", "version", "to", "os", "country", "inventory", "name", "bs", "set_passwd", "cmdline", "upgrade", "no_diagram") # arguments.MOUNTPOINT = arguments["--mount"] arguments.FORMAT = arguments["--format"] arguments.FROM = arguments["--from"] arguments.IMAGE = arguments["--image"] arguments.output = "table" # hard code for now arguments.bs = arguments.bs or "4M" arguments.yes = arguments["-y"] if len(arguments.TAG) == 0: arguments.TAG = "latest" # VERBOSE(arguments) def execute(label, function): StopWatch.start(label) result = function StopWatch.stop(label) StopWatch.status(label, True) return result burner = Burner() sdcard = SDCard() if arguments.imager: arguments.TAG = arguments.TAG or ["latest-lite"] Console.msg(f"Tags: {arguments.TAG}") try: file = Imager.fetch(tag=arguments.TAG) except: # noqa: E722 pass try: Imager.launch(file=file) except Exception as e: Console.error( f"could not find image with the tag {arguments.TAG}\n\n{e}\n" ) return "" elif arguments.gui: from cloudmesh.burn.gui import Gui VERBOSE(arguments) g = Gui(hostname=arguments.hostname, ip=arguments.ip, dryrun=arguments.dryrun, no_diagram=arguments.no_diagram) g.run() return "" elif arguments.raspberry: banner(txt="RaspberryOS Burn", figlet=True) if arguments.inventory: inv_path = path_expand(f'~/.cloudmesh/{arguments.inventory}') try: burner = RaspberryBurner( inventory=inv_path, ssid=arguments['--ssid'], wifipassword=arguments['--wifipassword'], country=arguments['--country']) except: Console.error('Burner Error') return "" else: try: burner = RaspberryBurner( names=arguments.NAMES, ssid=arguments['--ssid'], wifipassword=arguments['--wifipassword'], force_inv=arguments['-f'], country=arguments['--country']) except Exception as e: Console.error('Burner Error') raise e return "" execute( "burn raspberry", burner.multi_burn( names=arguments.NAMES, devices=arguments.device, verbose=arguments['-v'], password=arguments['--password'], )) return "" elif arguments.ubuntu: banner(txt="Ubuntu Burn with cloud-init", figlet=True) names = Parameter.expand(arguments.NAMES) if len(Parameter.expand(arguments.device)) > 1: Console.error( "Too many devices specified. Please only specify one") return "" if arguments.inventory: c = Configure(inventory=arguments.inventory, debug=arguments['-v']) inv = Inventory(filename=arguments.inventory) else: names = Parameter.expand(arguments.NAMES) manager, workers = Host.get_hostnames(names) if workers: worker_base_name = ''.join( [i for i in workers[0] if not i.isdigit()]) cluster_name = manager or worker_base_name inventory = path_expand( f'~/.cloudmesh/inventory-{cluster_name}.yaml') if not os.path.exists(inventory) or arguments['-f']: if not manager: Console.error("No inventory found. Can not create an " "inventory without a " "manager.") return "" Inventory.build_default_inventory( filename=inventory, manager=manager, workers=workers, manager_image='ubuntu-20.10-64-bit', worker_image='ubuntu-20.10-64-bit') c = Configure(inventory=inventory, debug=arguments['-v'], download_images=True) inv = Inventory(filename=inventory) names = Parameter.expand(arguments.NAMES) manager, workers = Host.get_hostnames(names) if manager: if not arguments.ssid and 'wifi' in c.configs[manager][ 'services']: arguments.ssid = get_ssid() if arguments.ssid == "": Console.info('Could not determine SSID, skipping wifi ' 'config') arguments.ssid = None if not arguments.wifipassword and arguments.ssid is not None: arguments.country = Shell.locale().upper() arguments.wifipassword = getpass( f"Using --SSID=" f"{arguments.ssid} and " f" --COUNTRY=" f"{arguments.country}, please " f"enter wifi password:"******"" if 'ubuntu' not in tag: Console.error( "This command only supports burning ubuntu cards") return "" sdcard = SDCard(card_os="ubuntu") # Code below taken from arguments.sdcard try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" # determine if we are burning a manager, as this needs to be done # first to get the ssh public key # manager = False # for name in names: # if not inv.has_host(name): # Console.error(f'Could not find {name} in inventory {inv.filename}') # return "" # service = inv.get(name=name, attribute='service') # if service == 'manager' and not manager: # manager = name # # make manager first in names # names.remove(name) # names.insert(0, name) # elif service == 'manager' and manager: # raise Exception('More than one manager detected in NAMES') for name in names: if not yn_choice( f'Is the card to be burned for {name} inserted?'): if not yn_choice( f"Please insert the card to be burned for {name}. " "Type 'y' when done or 'n' to terminante"): Console.error("Terminating: User Break") return "" service = inv.get(name=name, attribute='service') # Make sure bridge is only enabled if WiFi enabled if service == 'manager': services = inv.get(name=name, attribute='services') if 'bridge' in services and not arguments.ssid: Console.error( 'Service bridge can only be configured if WiFi' ' is enabled with --ssid and --wifipassword') return "" else: enable_bridge = 'bridge' in services Console.info(f'Burning {name}') sdcard.format_device(device=arguments.device, yes=True) sdcard.unmount(device=arguments.device) sdcard.burn_sdcard(tag=tag, device=arguments.device, yes=True) sdcard.mount(device=arguments.device, card_os="ubuntu") if service == 'manager': # Generate a private public key pair for the manager that will be persistently used # priv_key, pub_key = c.generate_ssh_key(name) # Write priv_key and pub_key to /boot/id_rsa and /boot/id_rsa.pub # SDCard.writefile(filename=f'{sdcard.boot_volume}/id_rsa', content=priv_key) # SDCard.writefile(filename=f'{sdcard.boot_volume}/id_rsa.pub', content=pub_key) c.build_user_data( name=name, country=arguments.country, upgrade=arguments.upgrade, with_bridge=enable_bridge).write( filename=sdcard.boot_volume + '/user-data') c.build_network_data(name=name, ssid=arguments.ssid, password=arguments.wifipassword)\ .write(filename=sdcard.boot_volume + '/network-config') else: c.build_user_data( name=name, add_manager_key=manager, upgrade=arguments.upgrade).write( filename=sdcard.boot_volume + '/user-data') c.build_network_data(name=name).write( filename=sdcard.boot_volume + '/network-config') time.sleep( 1 ) # Sleep for 1 seconds to give ample time for writing to finish sdcard.unmount(device=arguments.device, card_os="ubuntu") Console.info("Remove card") Console.ok(f"Burned {len(names)} card(s)") return "" elif arguments.firmware and arguments.check: execute("firmware check", burner.firmware(action="check")) return "" elif arguments.firmware and arguments.update: execute("firmware update", burner.firmware(action="update")) return "" if arguments.check: execute("check", burner.check(device=arguments.device)) return "" elif arguments.versions and arguments['image']: StopWatch.start("image versions") result = Image.create_version_cache(refresh=arguments["--refresh"]) output = "table" if arguments["--yaml"]: output = "yaml" order = ["tag", 'date', "os", "type", 'version'] header = ["Tag", 'Date', "OS", "Type", 'Version'] if arguments.details: order = ["tag", 'date', "os", "type", 'version', "url"] header = ["Tag", 'Date', "OS", "Type", 'Version', "Url"] print( Printer.write(result, order=order, header=header, output=output)) StopWatch.stop("image versions") StopWatch.status("image versions", True) return "" elif arguments.load: execute("load", sdcard.load_device(device=arguments.device)) return "" elif arguments[ "format"]: # as format is a python word, we need to use an index execute( "format", sdcard.format_device(device=arguments.device, unmount=True)) return "" elif arguments.network and arguments["list"]: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" ip = arguments.ip or Network.address()[0]['local'] details = Network.nmap(ip=ip) if arguments.used: print(','.join([x['ip'] for x in details])) else: print( Printer.write(details, order=[ 'name', "ip", "status", "latency", ], header=[ 'Name', "IP", "Status", "Latency", ])) return "" elif arguments.network: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" # print (Network.nmap()) details = Network.address() print( Printer.write(details, order=['label', "local", "broadcast"], header=["Label", "Local", "Broadcast"])) return "" elif arguments.wifi: password = arguments.passwd ssid = arguments.ssid or get_ssid() country = arguments.country if password is None: password = getpass("Please enter the Wifi password or enter " "for no password: "******"macos" elif os_is_linux(): host = "linux" elif os_is_pi(): host = "raspberry" else: Console.error( "This command is not yet implemented for your OS") return "" burner.configure_wifi(ssid, psk=password, country=country, host=host) return "" elif arguments.info: output = arguments.output or "table" card = SDCard() execute("info", card.info(output=output)) try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" return "" elif arguments.install: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" execute("install", burner.install()) return "" elif arguments.shrink: if os_is_mac(): Console.error("Not yet implemented on MacOS") return "" execute("shrink", burner.shrink(image=arguments.IMAGE)) return "" elif arguments.backup: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" execute( "backup", sdcard.backup(device=arguments.device, to_file=arguments.to)) return "" elif arguments[ "copy"]: # as copy is a reserved word we need to use the index USB.check_for_readers() execute( "copy", sdcard.copy(device=arguments.device, from_file=arguments.FROM)) return "" elif arguments.sdcard: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if arguments.device is None: card = SDCard() card.info() Console.error("Please specify a device") return "" arguments.TAG = arguments.TAG or ["latest-lite"] if any("ubuntu" in tag for tag in arguments.TAG): sdcard = SDCard(card_os="ubuntu") execute( "format", sdcard.format_device(device=arguments.device, unmount=True)) execute("unmount", sdcard.unmount(device=arguments.device)) execute( "sdcard", sdcard.burn_sdcard(tag=arguments.TAG, device=arguments.device, yes=arguments.yes)) return "" elif arguments.mount: if arguments.device is None: card = SDCard card.info() Console.error("Please specify a device") return "" execute( "mount", sdcard.mount(device=arguments.device, card_os=arguments.os)) return "" elif arguments.unmount: card = SDCard(card_os=arguments.os) execute( "unmount", card.unmount(device=arguments.device, card_os=arguments.os)) return "" elif arguments.mac: hostnames = Parameter.expand(arguments.hostname) execute("mac", burner.mac(hostnames=hostnames)) return "" elif arguments.set: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if arguments.hostname: execute("set hostname", burner.set_hostname(arguments.hostname)) if arguments.ip: execute("set ip", burner.set_static_ip(arguments.ip)) if arguments.key: execute("set key", burner.set_key(arguments.key)) if arguments.keyboard: execute("set keyboard", burner.keyboard(country=arguments.keyboard)) if arguments.cmdline: execute("set cmdline", burner.set_cmdline(arguments.cmdline)) return "" elif arguments.enable and arguments.ssh: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" execute("enable ssh", burner.enable_ssh()) return "" # elif arguments.versions and arguments.image: # image = Image() elif arguments.ls and arguments['image']: execute("image ls", Image().ls()) return "" elif arguments.delete and arguments.IMAGE: execute("image rm", Image().rm(arguments.IMAGE)) return "" elif arguments["get"] and arguments['image'] and arguments["--url"]: image = Image() execute("image fetch", image.fetch(url=arguments.url)) return "" elif arguments["get"] and arguments['image'] and arguments["TAG"]: tag = arguments["TAG"] if "latest" in tag and ("full" in tag or "lite" in tag): result = Image.create_version_cache( refresh=arguments["--refresh"]) image = Image() execute("image fetch", image.fetch(tag=arguments["TAG"])) return "" elif arguments["get"] and arguments['image']: image = Image() execute("image fetch", image.fetch(tag="latest")) return "" elif arguments.cluster: # is true when # # cms burn cluster --hostname=red,red00[1-2] # --device=/dev/sdb # --ip=10.1.1.[1-3] # --ssid=myssid # --wifipassword=mypass # try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" execute("cluster", burner.cluster(arguments=arguments)) return "" elif arguments.create and arguments.inventory: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if not os_is_pi(): print() Console.error( "This command has only been written for a Raspberry Pis. " "Terminating for caution") print() if yn_choice("Continue anyways?"): pass else: return if not arguments.name: Console.error( "Missing --name parameter. See cms help burn for usage") return "" if not arguments.device: Console.error( "Missing --device parameter. See cms help burn for usage") return "" StopWatch.start("burn inventory") multi_burner = MultiBurner() # Perhaps we want to change the path at some point inventory = f"~/.cloudmesh/{arguments.inventory}" multi_burner.burn_inventory(inventory=inventory, name=arguments.name, device=arguments.device, yes=arguments.yes, passwd=arguments.passwd) StopWatch.stop("burn inventory") StopWatch.status("burn inventory", True) StopWatch.benchmark(sysinfo=False, csv=False) return "" elif arguments.create: try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" if arguments["--passwd"]: passwd = arguments["--passwd"] elif "PASSWD" in os.environ: passwd = os.environ["PASSWD"] else: passwd = generate_strong_pass() psk = None if arguments["--ssid"]: ssid = arguments["--ssid"] if arguments["--wifipassword"]: psk = arguments["--wifipassword"] else: psk = None else: if arguments["--wifipassword"]: print("Can't have wifi password with no ssid") return else: ssid = None image = 'latest' or arguments.IMAGE dev = os.environ['DEV'] if 'DEV' in os.environ else None devices = arguments["--device"] or dev or None if devices is not None: devices = Parameter.expand_string(devices) hostnames = Parameter.expand(arguments.hostname) if arguments.burnimg is None: burning = hostnames else: burning = arguments.burning VERBOSE(arguments) ips = None if not arguments.ip else Parameter.expand(arguments.ip) key = arguments.sshkey tag = arguments['--tag'] if os_is_pi() or os_is_linux(): blocksize = arguments.blocksize StopWatch.start("total") multi = MultiBurner() multi.burn_all( burning=burning, image=image, device=devices, blocksize=blocksize, progress=True, hostnames=hostnames, # not difference between names and name, maybe we should align ips=ips, key=key, password=passwd, ssid=ssid, psk=psk, tag=tag, yes=arguments.yes) StopWatch.stop("total") StopWatch.status("total", True) StopWatch.benchmark(sysinfo=False, csv=False) else: Console.error( "This command is only supported ona Pi and Linux") return "" Console.error("see manual page: cms help burn") return ""
def do_jupyter(self, args, arguments): """ :: Usage: jupyter lab HOST PORT [DIR] jupyter tunnel HOST PORT jupyter stop HOST PORT jupyter open PORT jupyter info [PORT] jupyter backup This command can start a jupyter notebook on a remote machine and use it in your browser. Arguments: USER The username on the remote machine HOST The hostname of the remote machine PORT The port of the remote machine DIR The directory where the notebooks are located Description: Step 1: Setting up a .bash_profile file If you have your python venv set up you need to add it to the .bash_profile on your remote machine. A possible profile file could look as follows: if [ -f ~/.bash_aliases ]; then . ~/.bash_aliases fi export PATH=$HOME/ENV3/bin:$PATH source $HOME/ENV3/bin/activate Step 2: Start the remote notebook server in a terminal Note: After the start you will not be able to use that terminal cms jupyter start HOST PORT Thsi command will aslo establich an SSH tunel and open in the web browser jupyter lab """ VERBOSE(arguments) jupyter = Jupyter(arguments.HOST, arguments.PORT, arguments.DIR) if arguments.lab: jupyter.start() if arguments.tunnel: jupyter.tunnel() elif arguments.stop: jupyter.stop() data = jupyter.info() print(Printer.attribute(data)) elif arguments.open: jupyter.open() elif arguments.test: jupyter.test() elif arguments.info: data = jupyter.info() print(Printer.attribute(data)) elif arguments.backup: data = jupyter.info() data.backup = backup_name(data['cwd']) print("Generate backup") print(f"From: {data.cwd}") print(f"To: {data.backup}") if yn_choice("Continue"): os.system(f"cp -r -v {data.cwd} {data.backup}") Console.ok(f"Backup created at: {data.backup}") return ""
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key init key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME key gen (ssh | pem) [--filename=FILENAME] [--nopass] [--set_path] [--force] key reformat (ssh | pem) [--filename=FILENAME] [--format=FORMAT] [--nopass] [--pub] key verify (ssh | pem) [--filename=FILENAME] [--pub] [--check_pass] Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located FORMAT Desired key format (SubjectInfo, SSH, OpenSSL, PKCS8) Options: --dir=DIR the directory with keys [default: ~/.ssh] --check_pass Flag where program query user for password --filename=FILENAME the name and full path to the file --nopass Flag indicating if the key has no password --output=OUTPUT the format of the output [default: table] --pub Indicates that the public key is passed in --set_path Sets the cloudmesh encryption key path to the full path of the generated keys --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Management of public keys is an essential component of accessing virtual machines in the cloud. There are a number of sources where you can find public keys. This includes the ~/.ssh directory and for example github. If you do not already have a public-private key pair they can be generated using cloudmesh key gen ssh This will create the public-private keypair of ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub in OpenSSH format key gen pem This will create the public-private keypair of ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub in PEM format key gen (ssh | pem) --filename=~/.cloudmesh/foobar This will generate the public-private key pair of ~/.cloudmesh/foobar and ~/.cloudmesh/foobar.pub key gen (ssh | pem) --filename=~/.cloudmesh/foobar --set_path This will generate the keys as stated above, but it will also set cloudmesh to use these keys for encryption. Keys can also be verified for their formatting and passwords. By default cloudmesh checks ~/.ssh/id_rsa and ~/.ssh/id_rsa.pub If the key is password protected the formatting can only be verified if the password is provided (--check_pass argument) key verify pem Verifies that ~/.ssh/id_rsa has PEM format key verify ssh --pub Verifies that ~/.ssh/id_rsa.pub has OpenSSH format key verify pem --filename=~/.cloudmesh/foobar Verifies if the private key located at ~/.cloudmesh/foobar is password protected key verify pem --filenam=~/.cloudmesh/foobar --check_pass Request the password to the file, then checks if the key is in proper PEM format You may find the need to keep the values of your keys but different encodings or formats. These aspects of your key can also be changed using cloudmesh. key reformat pem Will reformat the ~/.id_rsa.pub key from PEM to OpenSSH key reformat ssh Will reformat the ~/.id_rsa.pub key from OpenSSH to PEM key reformat --filename=~/.id_rsa --format=PKCS8 Will reformat the private key to PKCS8 format Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. key set adds the ~/.ssh/id_rsa.pub key with the name specified in cloudmesh.profile.user. It also sets the variable key to that user. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print( Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output)) map_parameters(arguments, 'check_pass', 'cloud', 'dir', 'dryrun', 'filename', 'force', 'format', 'name', 'nopass', 'output', 'pub', 'pwd', 'set_path', 'source') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] # NOT IMPLEMENTED YET key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() if arguments["--source"] == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif arguments["--source"] == "git": name = arguments.NAME or "git" key.add("git", "git") else: config = Config() name = config["cloudmesh.profile.user"] kind = "ssh" key.add(name, kind) elif arguments.init: """ key init """ config = Config() username = config["cloudmesh.profile.user"] if username == "TBD": Console.error( "Please set cloudmesh.profile.user in ~/.cloudmesh.yaml") u = os.environ["USER"].lower().replace(" ", "") Console.msg( f"To change it you can use the command. Define a NAME such as '{u}' e.g." ) Console.msg("") Console.msg(f" cms config set cloudmesh.profile.user={u}") Console.msg("") return "" key = Key() key.add(username, "ssh") variables['key'] = username elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing default key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, vmnames = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.gen: """ key gen (ssh | pem) [--filename=FILENAME] [--nopass] [--set_path] [--force] Generate an RSA key pair with pem or ssh encoding for the public key. The private key is always encoded as a PEM file. """ config = Config() # Check if password will be requested ap = not arguments.nopass if not ap: Console.warning("Private key will NOT have a password") cnt = yn_choice(message="Continue, despite risk?", default="N") if not cnt: sys.exit() # Discern the name of the public and private keys rk_path = None uk_path = None if arguments.filename: fp = path_expand(arguments.filename) fname, fext = os.path.splitext(fp) if fext == ".pub" or fext == ".ssh": rk_path = fname uk_path = fp elif fext == ".priv" or fext == ".pem": rk_path = fp uk_path = fname + ".pub" else: rk_path = fp uk_path = rk_path + ".pub" else: rk_path = path_expand("~/.ssh/id_rsa") uk_path = rk_path + ".pub" # Check if the file exist, if so confirm overwrite def check_exists(path): if os.path.exists(path): Console.info(f"{path} already exists") ovwr_r = yn_choice(message=f"overwrite {path}?", default="N") if not ovwr_r: Console.info(f"Not overwriting {path}. Quitting") sys.exit() if not arguments.force: check_exists(rk_path) check_exists(uk_path) # Set the path if requested if arguments.set_path: config['cloudmesh.security.privatekey'] = rk_path config['cloudmesh.security.publickey'] = uk_path config.save() Console.msg(f"\nPrivate key: {rk_path}") Console.msg(f"Public key: {uk_path}\n") # Generate the Private and Public keys kh = KeyHandler() r = kh.new_rsa_key() u = kh.get_pub_key(priv=r) # Serialize and write the private key to the path sr = kh.serialize_key(key=r, key_type="PRIV", encoding="PEM", format="PKCS8", ask_pass=ap) # Force write the key (since we check file existence above) kh.write_key(key=sr, path=rk_path, force=True) # Determine the public key format and encoding enc = None forma = None if arguments.ssh: enc = "SSH" forma = "SSH" elif arguments.pem: enc = "PEM" forma = "SubjectInfo" # Serialize and write the public key to the path su = kh.serialize_key(key=u, key_type="PUB", encoding=enc, format=forma, ask_pass=False) # Force write the key (since we check file existence above) kh.write_key(key=su, path=uk_path, force=True) Console.ok("Success") elif arguments.verify: """ key verify (ssh | pem) [--filename=FILENAME] [--pub] [--check_pass] Verifies the encoding (pem or ssh) of the key (private or public) """ # Initialize variables kh = KeyHandler() # Determine filepath fp = None if arguments.filename is None: config = Config() kp = path_expand("~/.ssh/id_rsa") if arguments.pub: fp = kp + ".pub" else: fp = kp else: fp = arguments.filename # Discern key type kt = enc = None ap = True if arguments.pub: # Load the public key, if no error occurs formatting is correct kt, kta, ap = "public", "PUB", False # Discern public key encoding if arguments.ssh: enc, e = "OpenSSH", "SSH" elif arguments.pem: # PEM encoding enc = e = "PEM" else: # Load the private key to verify the format and password of the # key file. If no error occurs the format and pwd are correct kt, kta = "private", "PRIV" enc = e = "PEM" ap = False if arguments.check_pass: ap = True try: k = kh.load_key(path=fp, key_type=kta, encoding=e, ask_pass=ap) m = f"Success the {kt} key {fp} has proper {enc} format" Console.ok(m) except ValueError as e: # The formatting was incorrect m = f"Failure, {kt} key {fp} does not have proper {enc} format" Console.error(m) raise e except TypeError as e: # Success, we didn't ask the user for the key password and # we received an error for not entering the password, thus # the key is password protectd if not arguments.check_pass: Console.ok("The key is password protected") else: # Error Message handled in kh.load_key() raise e elif arguments.reformat: """ key reformat (ssh | pem) [--filename=FILENAME] [--format=FORMAT] [--nopass] [--pub] Restructures a key's format, encoding, and password """ # Initialize variables kh = KeyHandler() # Determine key type fname, fext = os.path.splitext(arguments.filename) kt = "PRIV" if arguments.pub or fext == ".pub": kt = "PUB" # Determine new encoding use_pem = True if arguments.ssh: use_pem = False kh.reformat_key(path=arguments.filename, key_type=kt, use_pem=use_pem, new_format=arguments.format, ask_pass=not arguments.nopass) elif arguments.delete and arguments.NAMES: # key delete NAMES [--dryrun] names = Parameter.expand(arguments.NAMES) cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") error = [] for key in db_keys: name = key['name'] if name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name}") else: db.delete(collection="local-key", name=name) Console.ok(f"delete {name}") return "" elif arguments.group: raise NotImplementedError return ""
def do_storage(self, args, arguments): """ :: Usage: storage run storage clean storage monitor [--storage=SERVICES] [--status=all | --status=STATUS] [--output=output] [--clear] storage create dir DIRECTORY [--storage=SERVICE] [--parallel=N] [--run] storage get SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N] [--run] storage put SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N] [--run] storage list [--storage=SERVICE] [SOURCE] [--recursive] [--parallel=N] [--output=OUTPUT] [--dryrun] [--run] storage delete SOURCE [--storage=SERVICE] [--parallel=N] [--dryrun] [--run] storage search DIRECTORY FILENAME [--recursive] [--storage=SERVICE] [--parallel=N] [--output=OUTPUT] [--run] storage sync SOURCE DESTINATION [--name=NAME] [--async] [--storage=SERVICE] storage sync status [--name=NAME] [--storage=SERVICE] storage config list [--output=OUTPUT] storage copy --source=SOURCE:SOURCE_FILE_DIR --target=TARGET:TARGET_FILE_DIR [--run] storage cc --source=SOURCE:SOURCE_FILE_DIR --target=TARGET:TARGET_FILE_DIR This command does some useful things. Arguments: SOURCE SOURCE can be a directory or file DESTINATION DESTINATION can be a directory or file DIRECTORY DIRECTORY refers to a folder on the cloud service SOURCE:SOURCE_FILE_DIR source provider name: file or directory name TARGET:SOURCE_FILE_DIR destination provider name Options: --storage=SERVICE specify the cloud service name like aws or azure or box or google Description: commands used to upload, download, list files on different cloud storage services. storage run Execute the actions in database that are in waiting status. > storage monitor [--storage=SERVICE] > [--status=all | --status=STATUS] > [--output=output] > [--clear] Monitor the actions in database and refresh every 5 seconds. > storage put SOURCE DESTINATION [--recursive] [--storage=SERVICE] > [--parallel=N] Uploads the file specified in the filename to specified cloud from the SOURCEDIR. > storage get SOURCE DESTINATION [--recursive] [--storage=SERVICE] > [--parallel=N] Downloads the file specified in the filename from the specified cloud to the DESTDIR. storage delete SOURCE [--parallel=N] [--dryrun] Deletes the file specified in the filename from the specified cloud. > storage list [SOURCE] [--recursive] [--parallel=N] > [--output=OUTPUT] [--dryrun] lists all the files from the container name specified on the specified cloud. storage create dir DIRECTORY [--storage=SERVICE] [--parallel=N] creates a folder with the directory name specified on the specified cloud. > storage search DIRECTORY FILENAME [--recursive] > [--storage=SERVICE] > [--parallel=N] > [--output=OUTPUT] searches for the source in all the folders on the specified cloud. sync SOURCE DESTINATION puts the content of source to the destination. If --recursive is specified this is done recursively from the source If --async is specified, this is done asynchronously If a name is specified, the process can also be monitored with the status command by name. If the name is not specified all date is monitored. sync status The status for the asynchronous sync can be seen with this command config list Lists the configures storage services in the yaml file storage copy SOURCE DESTINATION Copies files from source storage to destination storage. The syntax of SOURCE and DESTINATION is: SOURCE - awss3:source.txt DESTINATION - azure:target.txt Description of the copy command: Command enables to Copy files between different cloud service providers, list and delete them. This command accepts `aws` , `google` and `local` as the SOURCE and TARGET provider. cms storage copy --source=SERVICE:SOURCE --target=DEST:TARGET Command copies files or directories from Source provider to Target Provider. cms storage slist --source=SERVICE:SOURCE Command lists all the files present in SOURCE provider's in the given SOURCE_FILE_DIR location This command accepts `aws` or `google` as the SOURCE provider cms storage sdelete --source=SERVICE:SOURCE Command deletes the file or directory from the SOURCE provider's SOURCE_FILE_DIR location Examples: > cms storage_service copy --source=local:test1.txt > --target=aws:uploadtest1.txt cms storage_service list --source=google:test cms storage_service delete --source=aws:uploadtest1.txt cms storage put test_file1.txt aws_test_file1.txt cms storage put ./recur_dir recur_dir_aws/ --recursive cms storage put ./recur_dir recur_dir_aws/ cms storage get aws_test_file1.txt aws_file1.txt cms storage get recur_dir_aws from_aws_dir cms storage get recur_dir_aws from_aws_dir --recursive cms storage list cms storage list --recursive cms storage list aws:recur_dir_aws --recursively cms storage delete aws:aws_test_file1.txt cms storage search recur_dir_aws recur_file1.txt Example: set storage=aws storage put SOURCE DESTINATION --recursive is the same as storage --storage=aws put SOURCE DESTINATION --recursive storage copy aws:source.txt oracle:target.txt """ # arguments.CONTAINER = arguments["--container"] VERBOSE(arguments) map_parameters(arguments, "dryrun", "recursive", "storage", "source", "target", "parallel") source = arguments.source target = arguments.target variables = Variables() parallelism = arguments.parallel or 1 arguments.storage = Parameter.expand(arguments.storage or variables['storage']) run_immediately = arguments['--run'] if arguments.monitor: provider = Provider(arguments.storage[0], parallelism=parallelism) status = arguments['--status'] or "all" output = arguments['--output'] or "table" result = provider.monitor(status=status, output=output) elif arguments.clean: provider = Provider(arguments.storage[0], parallelism=parallelism) result = provider.clean() elif arguments.run: provider = Provider(arguments.storage[0], parallelism=parallelism) result = provider.run() elif arguments['get']: provider = Provider(arguments.storage[0], parallelism=parallelism) result = provider.get(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) if run_immediately: provider.run() elif arguments.put: provider = Provider(arguments.storage[0], parallelism=parallelism) result = provider.put(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) if run_immediately: provider.run() elif arguments.create and arguments.dir: provider = Provider(arguments.storage[0], parallelism=parallelism) result = provider.create_dir(arguments.DIRECTORY) if run_immediately: provider.run() elif arguments.list: """ storage list SOURCE [--parallel=N] """ if variables['storage']: default_source = f"{variables['storage']}:/" else: default_source = "local:/" sources = arguments.SOURCE or default_source if not ":" in sources: sources = f"{variables['storage']}:{sources}" sources = Parameter.expand(sources) deletes = [] for source in sources: storage, entry = Parameter.separate(source) storage = storage or source or "local" deletes.append((storage, entry)) _sources = ', '.join(sources) for delete in deletes: service, entry = delete if arguments.dryrun: print(f"Dryrun: list {service}:{entry}") else: provider = Provider(service=service, parallelism=parallelism) provider.list(name=entry, recursive=arguments.recursive) if run_immediately: provider.run() return "" elif arguments.delete: """ storage delete SOURCE [--parallel=N] """ if variables['storage']: default_source = f"{variables['storage']}:/" else: default_source = "local:/" sources = arguments.SOURCE or default_source if not ":" in sources: sources = f"{variables['storage']}:{sources}" sources = Parameter.expand(sources) deletes = [] for source in sources: storage, entry = Parameter.separate(source) storage = storage or source or "local" deletes.append((storage, entry)) _sources = ', '.join(sources) answer = yn_choice(f"Would you like to delete {_sources}?", default="no") if answer: for delete in deletes: service, entry = delete if arguments.dryrun: print(f"Dryrun: delete {service}:{entry}") else: provider = Provider(service=service, parallelism=parallelism) provider.delete(name=entry) else: Console.error("Deletion canceled") if run_immediately: provider.run() return "" elif arguments.search: for storage in arguments.storage: provider = Provider(storage, parallelism=parallelism) provider.search(arguments.DIRECTORY, arguments.FILENAME, arguments.recursive) if run_immediately: provider.run() elif arguments.rsync: # TODO: implement raise NotImplementedError elif arguments['cc']: scloud, sfileDir = source.split(":", 1) or None tcloud, tfileDir = target.split(":", 1) or None print(f" Copying from Source {scloud} : {sfileDir} to Target " f" {tcloud} : {tfileDir}") cloudName = ["aws", "google"] if scloud in cloudName: provider = Provider(service=scloud, parallelism=parallelism) provider.copyFiles(scloud, sfileDir, tcloud, tfileDir) else: print("Not Implemented") return "" elif arguments.copy: scloud, sbucket = arguments['--source'].split(":", 1) or None tcloud, tbucket = arguments['--target'].split(":", 1) or None if scloud == "aws" or scloud == "google": provider = Provider(service=scloud, parallelism=parallelism) provider.copy(scloud, tcloud, sbucket, tbucket) else: provider = Provider(service=tcloud, parallelism=parallelism) provider.copy(arguments['--source'], arguments['--target'], arguments.recursive) if run_immediately: provider.run() return ""
def burn( self, name=None, device=None, verbose=False, password=None, ): """ Given the name of a config, burn device with RaspberryOS and configure properly """ if device is None: Console.error('Device not specified') return if name is None: Console.error('Name to burn is not specified') return if name not in self.configs: Console.error( f'Could not find {name} in Inventory. Is the service column marked as "manager" or "worker"?' ) return config = self.configs[name] sdcard = SDCard(card_os="raspberry") try: USB.check_for_readers() except Exception as e: print() Console.error(e) print() return "" banner(f"Burn {name}", figlet=True) # Confirm card is inserted into device path if not yn_choice(f'Is the card to be burned for {name} inserted?'): if not yn_choice( f"Please insert the card to be burned for {name}. " "Type 'y' when done or 'n' to terminante"): Console.error("Terminating: User Break") return "" Console.info(f'Burning {name}') sdcard.format_device(device=device, yes=True) sdcard.unmount(device=device) sdcard.burn_sdcard(tag=config['tag'], device=device, yes=True) sdcard.mount(device=device, card_os="raspberry") # Read and write cmdline.txt cmdline = Cmdline() # Reading will create the proper script in the cmdline instance # No extra work needed cmdline.read(filename=f'{sdcard.boot_volume}/cmdline.txt') cmdline.write(filename=f'{sdcard.boot_volume}/cmdline.txt') # print(cmdline.get()) # Build the proper runfrist.sh runfirst = Runfirst() runfirst.set_hostname(config['host']) other_hosts, other_ips = self._get_hosts_for(name=config['host']) runfirst.set_hosts(names=other_hosts, ips=other_ips) if config['ip']: # config['router'] and config['dns'] are allowed to be empty String or None to skip its config # Default column in inventory is empty string runfirst.set_static_ip(ip=config['ip'], router=config['router'], dns=config['dns']) if password: runfirst.set_password(password=password) runfirst.set_locale(timezone=config['timezone'], locale=config['locale']) if self.ssid and 'wifi' in config['services']: runfirst.set_wifi(self.ssid, self.wifipasswd, self.country) runfirst.set_key(key=readfile(config['keyfile']).strip()) if 'bridge' in config['services']: runfirst.enable_bridge() runfirst.get(verbose=verbose) runfirst.write(filename=f'{sdcard.boot_volume}/{Runfirst.SCRIPT_NAME}') time.sleep( 1) # Sleep for 1 seconds to give ample time for writing to finish sdcard.unmount(device=device, card_os="raspberry") Console.ok(f'Burned {name}') return
def _continue(msg): if not arguments.dryrun: c = yn_choice(msg, default='y')
def do_init(self, args, arguments): """ :: Usage: init [CLOUD] [--debug] init yaml Description: Initializes cloudmesh while using data from ~/.cloudmesh/cloudmesh.yaml. If no cloud is specified a number of local collections are created. If a cloud is specified it also uploads the information about images, flavors, vms. It also uploads the security groups defined by default to the cloud. Bug: cms init cms init On Windows you have to run the cms init command twice upon first installation """ ssh_key = path_expand("~/.ssh/id_rsa.pub") if not os.path.exists(ssh_key): Console.error(f"The ssh key {ssh_key} does not exist.") print() Console.info( "cms init is a convenient program to set up cloudmesh" " with defaukt values. Please make sure you use ssh-keygen" " to set up the keys.\n\n" " Additionally we recommend that you use. \n\n" " cms test\n\n" " to identify other issues\n") return "" config = Config() if config["cloudmesh.profile.user"] == "TBD": Console.info( "cms init is a convenient program to set up cloudmesh" " with defaukt values. Please make sure you use in your" " ~/.cloudmesh/yaml file a valid value for\n\n" " cloudmesh.profile.user\n\n" " This name is aslo used as keyname in the cloud providers\n\n" " Additionally we recommend that you use. \n\n" " cms test\n\n" " to identify other issues\n") return "" if arguments.CLOUD == "yaml": location = path_expand("~/.cloudmesh/cloudmesh.yaml") path = Path(location) if path.is_file(): print() if yn_choice( "The file ~/.cloudmesh/cloudmesh.yaml exists, do you wnat to overwrite it", default='n'): config.fetch() print() Console.ok("File cloudmesh.yaml downloaded from Github") else: print() Console.warning("Download canceled") print() else: variables = Variables() if config["cloudmesh.data.mongo.MODE"] != 'running': try: print("MongoDB stop") MongoDBController().stop() except: Console.ok("MongoDB is not running. ok") machine = platform.lower() location = path_expand(config[ f'cloudmesh.data.mongo.MONGO_DOWNLOAD.{machine}.MONGO_PATH']) try: print("deleting:", location) shutil.rmtree(location) print("MongoDB folder deleted") except Exception as e: Console.error(f"Could not delete {location}") if platform == 'win32': print(e) Console.error(f"Please try to run cms init again ... ") return "" print("MongoDB create") os.system("cms admin mongo create") os.system("cms admin mongo start") else: print("MongoDB is on \"running\" mode!") print("Dropping cloudmesh database...") cm_db = CmDatabase() cm_db.connect() cm_db.drop_database() user = config["cloudmesh.profile.user"] secgroup = "flask" print("Set key") if user == "TBD": Console.error( "the user is not set in the yaml file for cloudmesh.profile.user") sys.exit() variables["key"] = user Console.ok("Config Security Initialization") Shell.execute("cms", ["config", "secinit"]) os.system("cms key add") os.system("cms sec load") if arguments.CLOUD is not None: cloud = arguments.CLOUD variables['cloud'] = cloud os.system(f"cms key upload {user} --cloud={cloud}") os.system(f"cms flavor list --refresh") os.system(f"cms image list --refresh") os.system(f"cms vm list --refresh") os.system(f"cms sec group load {secgroup} --cloud={cloud}") os.system(f"cms set secgroup={secgroup}") if arguments.debug: variables['debug'] = True variables['timer'] = 'on' variables['trace'] = True variables['verbose'] = '10' print() print("Variables") print() for name in variables: value = variables[name] print(f" {name}={value}")
def do_key(self, args, arguments): """ :: Usage: key -h | --help key list --cloud=CLOUDS [--output=OUTPUT] key list --source=ssh [--dir=DIR] [--output=OUTPUT] key list --source=git [--output=OUTPUT] [--username=USERNAME] key list [--output=OUTPUT] key init key add NAME --filename=FILENAME [--output=OUTPUT] key add [NAME] [--source=FILENAME] key add [NAME] [--source=git] key add [NAME] [--source=ssh] key delete NAMES [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] key group upload [NAMES] [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add [--group=GROUPNAMES] [--cloud=CLOUDS] [--dryrun] key group add --file=FILENAME key group delete [--group=GROUPNAMES] [NAMES] [--dryrun] key group list [--group=GROUPNAMES] [--output=OUTPUT] key group export --group=GROUNAMES --filename=FILENAME key gen (rsa | ssh) [--filename=FILENAME] [--nopass] [--set_path] key verify (ssh | pem) --filename=FILENAME [--pub] Arguments: VMS Parameterized list of virtual machines CLOUDS The clouds NAME The name of the key. SOURCE db, ssh, all KEYNAME The desired full path name to the key file OUTPUT The format of the output (table, json, yaml) FILENAME The filename with full path in which the key is located Options: --dir=DIR the directory with keys [default: ~/.ssh] --filename=FILENAME the name and full path to the file --nopass Flag indicating if the key has no password --output=OUTPUT the format of the output [default: table] --pub Indicates that the public key is passed in --set_path Sets the security key paths to KEYNAME --source=SOURCE the source for the keys --username=USERNAME the source for the keys [default: none] Description: Please note that some values are read from the cloudmesh.yaml file. One such value is cloudmesh.profile.user Manages public keys is an essential component of accessing virtual machine sin the cloud. There are a number of sources where you can find public keys. This includes teh ~/.ssh directory and for example github. Keys will be uploaded into cloudmesh database with the add command under the given NAME. If the name is not specified the name cloudmesh.profile.user is assumed. key add NAME --source=ssh adds the default key in ~/.ssh/id_rsa.pub key add NAME --source=FILENAME adds the key specified by the filename with the given name key add NAME --git --username=username adds a named github key from a user with the given github username. key set adds the ~/.ssh/id_rsa.pub key with the name specified in cloudmesh.profile.user. It also sets the variable key to that user. Once the keys are uploaded to github, they can be listed To list these keys the following list functions are provided. key list --source=git [--username=USERNAME] lists all keys in git for the specified user. If the name is not specified it is read from cloudmesh.yaml key list --source=ssh [--dir=DIR] [--output=OUTPUT] lists all keys in the directory. If the directory is not specified the default will be ~/.ssh key list NAMES lists all keys in the named virtual machines. List command can use the [--output=OUTPUT] option list the keys loaded to cloudmesh in the given format: json, yaml, table. table is default. The NAME can be specified and if omitted the name cloudmesh.profile.user is assumed. To get keys from the cloudmesh database the following commands are available: key delete NAMES deletes the Named keys. This may also have an impact on groups key rename NAME NEW renames the key from NAME to NEW in the cloudmesh database. Group management of keys is an important concept in cloudmesh, allowing multiple users to be added to virtual machines while managing the keys associated with them. The keys must be uploaded to cloudmesh database with a name so they can be used in a group. The --dryrun option executes the command without uploading the information to the clouds. If no group name is specified the group name default is assumed. If no cloudnamesh are specified, all active clouds are assumed. active clouds can be set in the cloudmesh.yaml file. key group delete [GROUPNAMES] [NAMES] [--dryrun] deletes the named keys from the named groups. key group list [GROUPNAMES] [--output=OUTPUT] list the key names and details in the group. key group upload [GROUPNAMES] [CLOUDS] [--dryrun] uploads the named groups to the specified clouds. In some cases you may want to store the public keys in files. For this reason we support the following commands. key group add --group=GROUPNAME --file=FILENAME the command adds the keys to the given group. The keys are written in the files in yaml format. key group export --group=GROUNAMES --filename=FILENAME the command exports the keys to the given group. The keys are written in the files in yaml format. The yaml format is as follows: cloudmesh: keys: NAMEOFKEY: name: NAMEOFKEY key: ssh-rsa AAAA..... comment group: - GROUPNAME ... If a key is included in multiple groups they will be added to the grouplist of the key """ def print_keys(keys): print( Printer.write( keys, sort_keys=["name"], order=["name", "type", "fingerprint", "comment"], header=["Name", "Type", "Fingerprint", "Comment"], output=arguments.output)) map_parameters(arguments, 'cloud', 'dir', 'dryrun', 'filename', 'name', 'nopass', 'output', 'pub', 'pwd', 'set_path', 'source') variables = Variables() if arguments.list and arguments.source == "git": config = Config() username = config["cloudmesh.profile.github"] keys = SSHkey().get_from_git(username) print_keys(keys) return "" elif arguments.list and arguments.source == "ssh": # this is much simpler sshkey = SSHkey() print_keys([sshkey]) return "" elif arguments.list and arguments.cloud: clouds = Parameter.expand(arguments.cloud) if len(clouds) == 0: variables = Variables() cloudname = variables['cloud'] clouds = [cloudname] keys = [] for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) keys = provider.keys() provider.Print(keys, output=arguments.output, kind="key") return "" elif arguments.list: cloud = "local" db = CmDatabase() keys = db.find(collection=f"{cloud}-key") print_keys(keys) return "" elif arguments.add: """ key add [NAME] [--source=FILENAME] # NOT IMPLEMENTED YET key add [NAME] [--source=git] key add [NAME] [--source=ssh] """ key = Key() if arguments["--source"] == "ssh": name = arguments.NAME or "ssh" key.add(name, "ssh") elif arguments["--source"] == "git": name = arguments.NAME or "git" key.add("git", "git") else: config = Config() name = config["cloudmesh.profile.user"] kind = "ssh" key.add(name, kind) elif arguments.init: """ key init """ config = Config() username = config["cloudmesh.profile.user"] if username == "TBD": Console.error( "Please set cloudmesh.profile.user in ~/.cloudmesh.yaml") u = os.environ["USER"].lower().replace(" ", "") Console.msg( f"To change it you can use the command. Define a NAME such as '{u}' e.g." ) Console.msg("") Console.msg(f" cms config set cloudmesh.profile.user={u}") Console.msg("") return "" key = Key() key.add(username, "ssh") variables['key'] = username elif arguments.upload: """ key upload [NAMES] [--cloud=CLOUDS] [--dryrun] key upload [NAMES] [VMS] [--dryrun] """ names = Parameter.expand(arguments.NAMES) # this may have a bug if NAMES is ommitted # # Step 0. Set keyname to variable # if names is None or len(names) == 0: config = Config() username = config["cloudmesh.profile.user"] names = [username] if len(names) == 1: name = names[0] variables = Variables() if "key" in variables: old = variables["key"] if old != name: Console.msg( f"Changing defualt key from {old} to {name}") variables["key"] = name # # Step 1. keys = find keys to upload # cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") keys = [] for key in db_keys: if key["name"] in names: keys.append(key) if len(keys) == 0: Console.error( f"No keys with the names {names} found in cloudmesh. \n" " Use the command 'key add' to add the key.") # # Step 2. iterate over the clouds to upload # clouds, names = Arguments.get_cloud_and_names( "list", arguments, variables) for cloud in clouds: print(f"cloud {cloud}") provider = Provider(name=cloud) for key in db_keys: name = key['name'] if name in names: try: r = provider.key_upload(key) Console.ok(f"upload key '{name} successful'. ") except ValueError as e: Console.error( f"key '{name} already exists in {cloud}.") return "" elif arguments.delete and arguments.cloud and arguments.NAMES: # key delete NAMES --cloud=CLOUDS [--dryrun] names = Parameter.expand(arguments.NAMES) clouds = Parameter.expand(arguments.cloud) for cloud in clouds: provider = Provider(name=cloud) for name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name} in {cloud}") else: images = provider.key_delete(name) return "" elif arguments.gen: """ key gen (rsa | ssh) [--filename=FILENAME] [--nopass] [--set_path] Generate an RSA key pair with pem or ssh encoding for the public key. The private key is always encoded as a PEM file. """ config = Config() # Check if password will be requested ap = not arguments.nopass if not ap: Console.warning("Private key will NOT have a password") cnt = yn_choice(message="Continue, despite risk?", default="N") if not cnt: sys.exit() # Discern the name of the public and private keys rk_path = None uk_path = None if arguments.filename: if arguments.filename[-4:] == ".pub": rk_path = path_expand(arguments.name[-4:]) uk_path = path_expand(arguments.name) elif arguments.filename[-5:] == ".priv": rk_path = path_expand(arguments.name) uk_path = path_expand(arguments.name[-5:]) else: rk_path = path_expand(arguments.filename) uk_path = rk_path + ".pub" else: rk_path = path_expand(config['cloudmesh.security.privatekey']) uk_path = path_expand(config['cloudmesh.security.publickey']) # Set the path if requested if arguments.set_path and arguments.filename: config['cloudmesh.security.privatekey'] = rk_path config['cloudmesh.security.publickey'] = uk_path config.save() Console.msg(f"\nPrivate key: {rk_path}") Console.msg(f"Public key: {uk_path}\n") # Generate the Private and Public keys kh = KeyHandler() r = kh.new_rsa_key() u = kh.get_pub_key(priv=r) # Serialize and write the private key to the path sr = kh.serialize_key(key=r, key_type="PRIV", encoding="PEM", format="PKCS8", ask_pass=ap) kh.write_key(key=sr, path=rk_path) # Determine the public key format and encoding enc = None forma = None if arguments.ssh: enc = "SSH" forma = "SSH" elif arguments.rsa: enc = "PEM" forma = "SubjectInfo" # Serialize and write the public key to the path su = kh.serialize_key(key=u, key_type="PUB", encoding=enc, format=forma, ask_pass=False) kh.write_key(key=su, path=uk_path) Console.ok("Success") elif arguments.verify: """ key verify (ssh | pem) --filename=FILENAME --pub Verifies the encoding (pem or ssh) of the key (private or public) """ kh = KeyHandler() fp = arguments.filename kt = None enc = None # Discern key type if arguments.pub: kt = "public" # Discern public key encoding if arguments.ssh: enc, e = "OpenSSH", "SSH" elif arguments.pem: #PEM encoding enc = e = "PEM" # Load the public key, if no error occurs formatting is correct u = kh.load_key(path=fp, key_type="PUB", encoding=e, ask_pass=False) else: kt, enc = "private", "PEM" # Load the private key to verify the formatting and password of # the key file. If no error occurs the format and pwd are correct r = kh.load_key(path=fp, key_type="PRIV", encoding=enc, ask_pass=True) m = f"Success the {kt} key {fp} has proper {enc} format" Console.ok(m) elif arguments.delete and arguments.NAMES: # key delete NAMES [--dryrun] names = Parameter.expand(arguments.NAMES) cloud = "local" db = CmDatabase() db_keys = db.find(collection=f"{cloud}-key") error = [] for key in db_keys: name = key['name'] if name in names: if arguments.dryrun: Console.ok(f"Dryrun: delete {name}") else: db.delete(collection="local-key", name=name) Console.ok(f"delete {name}") return "" elif arguments.group: raise NotImplementedError return ""
def burn_sdcard(self, image=None, tag=None, device=None, blocksize="4M", name="the inserted card", yes=False): """ Burns the SD Card with an image :param image: Image object to use for burning (used by copy) :type image: str :param name: :type name: str :param tag: tag object used for burning (used by sdcard) :type tag: str :param device: Device to burn to, e.g. /dev/sda :type device: str :param blocksize: the blocksize used when writing, default 4M :type blocksize: str :param yes: :type yes: str """ if image and tag: Console.error("Implementation error, burn_sdcard can't have image " "and tag.") return "" Console.info(f"Burning {name} ...") if image is not None: image_path = image else: image = Image().find(tag=tag) if image is None: Console.error("No matching image found.") return "" elif len(image) > 1: Console.error("Too many images found") print( Printer.write(image, order=["tag", "version"], header=["Tag", "Version"])) return "" image = image[0] if "ubuntu" in image["url"]: _name = os.path.basename(Image.get_name(image["url"])) _name = _name.replace(".xz", "") else: _name = os.path.basename(Image.get_name(image["url"])) + ".img" image_path = Image().directory + "/" + _name print(image_path) if not os.path.isfile(image_path): tags = ' '.join(tag) print() Console.error( f"Image with tags '{tags}' not found. To download use") print() Console.msg(f"cms burn image get {tags}") print() return "" orig_size = size = humanize.naturalsize(os.path.getsize(image_path)) # size = details[0]['size'] n, unit = size.split(" ") unit = unit.replace("GB", "G") unit = unit.replace("MB", "M") n = float(n) if unit == "G": n = n * 1000**3 elif unit == "M": n = n * 1000**2 size = int(n) banner(f"Preparing the SDCard {name}") print(f"Name: {name}") print(f"Image: {image_path}") print(f"Image Size: {orig_size}") print(f"Device: {device}") print(f"Blocksize: {blocksize}") if os_is_mac(): blocksize = blocksize.lower() print() Sudo.password() if device is None: Console.error("Please specify a device") return # # speedup burn for MacOS # if device.startswith("/dev/disk"): device = device.replace("/dev/disk", "/dev/rdisk") if os_is_mac(): details = USB.get_from_diskutil() USB.print_details(details) if not (yes or yn_choice(f"\nDo you like to write {name} on {device} " f"with the image {image_path}")): return "" # TODO Gregor verify this is ok commenting out this line # self.mount(device=device) if os_is_mac(): command = f"sudo dd if={image_path} bs={blocksize} |" \ f' tqdm --bytes --total {size} --ncols 80 |' \ f" sudo dd of={device} bs={blocksize}" else: # command = f"sudo dd if={image_path} of={device} bs={blocksize} status=progress conv=fsync" command = f"sudo dd if={image_path} bs={blocksize} oflag=direct |" \ f' tqdm --bytes --total {size} --ncols 80 |' \ f" sudo dd of={device} bs={blocksize} iflag=fullblock " \ f"oflag=direct conv=fsync" print(command) os.system(command) Sudo.execute("sync") if os_is_linux(): self.unmount(device=device, full=True) else: self.unmount(device=device)
def do_init(self, args, arguments): """ :: Usage: init [CLOUD] [--debug] init yaml Description: Initializes cloudmesh while using data from ~/.cloudmesh/cloudmesh.yaml. If no cloud is specified a number of local collections are created. If a cloud is specified it also uploads the information about images, flavors, vms. It also uploads the security groups defined by default to the cloud. Bug: cms init cms init On Windows you have to run the cms init command twice upon first installation """ if arguments.CLOUD == "yaml": config = Config() location = path_expand("~/.cloudmesh/cloudmesh.yaml") path = Path(location) if path.is_file(): print() if yn_choice( "The file ~/.cloudmesh/cloudmesh.yaml exists, do you wnat to overwrite it", default='n'): config.fetch() print() Console.ok("File cloudmesh.yaml downloaded from Github") else: print() Console.warning("Download canceled") print() else: variables = Variables() config = Config() try: print("MongoDB stop") MongoDBController().stop() except: Console.ok("MongoDB is not running. ok") machine = platform.lower() location = path_expand(config[ f'cloudmesh.data.mongo.MONGO_DOWNLOAD.{machine}.MONGO_PATH']) try: shutil.rmtree(location) print("MongoDB folder deleted") except: Console.error(f"Could not delete {location}") if platform == 'win32': Console.error(f"Please try to run cms init again ... ") exit(1) config = Config() user = config["cloudmesh.profile.user"] secgroup = "flask" print("Set key") if user == "TBD": Console.error( "the user is not set in the yaml file for cloudmesh.profile.user") sys.exit() variables["key"] = user Console.ok("Config Security Initialization") Shell.execute("cms", ["config", "secinit"]) print("MongoDB create") os.system("cms admin mongo create") os.system("cms admin mongo start") os.system("cms sec load") if arguments.CLOUD is not None: cloud = arguments.CLOUD variables['cloud'] = cloud os.system(f"cms key upload {user} --cloud={cloud}") os.system(f"cms flavor list --refresh") os.system(f"cms image list --refresh") os.system(f"cms vm list --refresh") os.system(f"cms sec group load {secgroup} --cloud={cloud}") os.system(f"cms set secgroup={secgroup}") if arguments.debug: variables['debug'] = True variables['timer'] = 'on' variables['trace'] = True variables['verbose'] = '10' print() print("Variables") print() for name in variables: value = variables[name] print(f" {name}={value}")
def burn_all(self, image="latest", device="dev/sda", blocksize="4M", progress=True, hostnames=None, ips=None, key=None): """ :param image: :param device: :param blocksize: :param progress: :param hostnames: :param ips: :param key: :return: """ #:param devices: string with device letters # # define the dev # devices = {} # dict of {device_name: empty_status} # # probe the dev # #pprint(Burner().info()) info_statuses = Burner().info() for device in info_statuses.keys(): #print("call the info command on the device and " # "figure out if an empty card is in it") # change the status based on what you found devices[device] = info_statuses[device]['empty'] # if we detect a non empty card we interrupt and tell # which is not empty. # (print out status of the devices in a table) device_statuses = devices.values() if False in device_statuses: print("\nEmpty status of devices:") for dev, empty_status in devices.items(): x = "" if empty_status else "not " print(f"Device {dev} is {x}empty") print() # detect if there is an issue with the cards, readers # TODO what exactly should be done here? # ask if this is ok to burn otherwise burn_all = yn_choice("Burn non-empty devices too?") # if yes burn all of them for which we have status "empty card" if not burn_all: # delete from devices dict any non-empty devices devices_to_delete = [] for device in devices.keys(): if devices[device] == False: devices_to_delete.append( device) # can't delete while iterating for device in devices_to_delete: del devices[device] print("Burning these devices:") print(' '.join(devices.keys())) keys = list(devices.keys()) for i in range(len(keys)): #for device, status in devices.items(): device = keys[i] status = devices[device] hostname = hostnames[i] ip = ips[i] self.burn(image, device, blocksize, progress, hostname, ip, key) os.system('tput bel') # ring the terminal bell to notify user print() if i < len(keys) - 1: input('Insert next card and press enter...') print('Burning next card...') print() i += 1 print(f"You burned {i} SD Cards") print("Done.")
elif len(details) > 1: Console.error( "For security reasons, please only put one USB writer in") sys.exit(1) else: device = "/dev/sdb" user = os.environ["USER"] os.system("cms burn info") banner(f"IS THE DEVICE CORRECTLY SET AS {device}?", c="#") if not yn_choice(f"Is the device correctly set as {device}"): sys.exit(1) if not (os_is_linux() or os_is_pi() or os_is_mac()): Console.error("OS is not Linux or Pi, test can not be performed") sys.exit(1) os.system("cms burn unmount") os.system("cms burn info") print() Console.warning( "If you see mount points above, please stop, unmount, and try again.") print() if not yn_choice(f"This test will be performed with the user '{user}' on " f"{device}. Select 'n' to input custom devive. Continue with "
def do_storage(self, args, arguments): """ :: Usage: storage create dir DIRECTORY [--storage=SERVICE] [--parallel=N] storage get SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N] storage put SOURCE DESTINATION [--recursive] [--storage=SERVICE] [--parallel=N] storage list [SOURCE] [--recursive] [--parallel=N] [--output=OUTPUT] [--dryrun] storage delete SOURCE [--parallel=N] [--dryrun] storage search DIRECTORY FILENAME [--recursive] [--storage=SERVICE] [--parallel=N] [--output=OUTPUT] storage sync SOURCE DESTINATION [--name=NAME] [--async] [--storage=SERVICE] storage sync status [--name=NAME] [--storage=SERVICE] storage config list [--output=OUTPUT] storage [--parallel=N] copy SOURCE DESTINATION [--recursive] storage copy --source=SOURCE:SOURCE_FILE_DIR --target=TARGET:TARGET_FILE_DIR This command does some useful things. Arguments: SOURCE SOURCE can be a directory or file DESTINATION DESTINATION can be a directory or file DIRECTORY DIRECTORY refers to a folder on the cloud service SOURCE:SOURCE_FILE_DIR source provider name: file or directory name TARGET:SOURCE_FILE_DIR destination provider name Options: --storage=SERVICE specify the cloud service name like aws or azure or box or google Description: commands used to upload, download, list files on different cloud storage services. storage put [options..] Uploads the file specified in the filename to specified cloud from the SOURCEDIR. storage get [options..] Downloads the file specified in the filename from the specified cloud to the DESTDIR. storage delete [options..] Deletes the file specified in the filename from the specified cloud. storage list [options..] lists all the files from the container name specified on the specified cloud. storage create dir [options..] creates a folder with the directory name specified on the specified cloud. storage search [options..] searches for the source in all the folders on the specified cloud. sync SOURCE DESTINATION puts the content of source to the destination. If --recursive is specified this is done recursively from the source If --async is specified, this is done asynchronously If a name is specified, the process can also be monitored with the status command by name. If the name is not specified all date is monitored. sync status The status for the asynchronous sync can be seen with this command config list Lists the configures storage services in the yaml file storage copy SOURCE DESTINATION Copies files from source storage to destination storage. The syntax of SOURCE and DESTINATION is: SOURCE - awss3:source.txt DESTINATION - azure:target.txt Description of the copy command: Command enables to Copy files between different cloud service providers, list and delete them. This command accepts `aws` , `google` and `local` as the SOURCE and TARGET provider. cms storage copy --source=SERVICE:SOURCE --target=DEST:TARGET Command copies files or directories from Source provider to Target Provider. cms storage slist --source=SERVICE:SOURCE Command lists all the files present in SOURCE provider's in the given SOURCE_FILE_DIR location This command accepts `aws` or `google` as the SOURCE provider cms storage sdelete --source=SERVICE:SOURCE Command deletes the file or directory from the SOURCE provider's SOURCE_FILE_DIR location Examples: cms storage_service copy --source=local:test1.txt --target=aws:uploadtest1.txt cms storage_service list --source=google:test cms storage_service delete --source=aws:uploadtest1.txt Example: set storage=azureblob storage put SOURCE DESTINATION --recursive is the same as storage --storage=azureblob put SOURCE DESTINATION --recursive storage copy azure:source.txt oracle:target.txt """ # arguments.CONTAINER = arguments["--container"] VERBOSE(arguments) map_parameters(arguments, "dryrun", "recursive", "storage", "source", "target") source = arguments.source target = arguments.target variables = Variables() VERBOSE(arguments) arguments.storage = Parameter.expand(arguments.storage) if arguments["get"]: provider = Provider(arguments.storage[0]) result = provider.get(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.put: provider = Provider(arguments.storage[0]) result = provider.put(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.create and arguments.dir: provider = Provider(arguments.storage[0]) result = provider.create_dir(arguments.DIRECTORY) elif arguments.list: """ storage list SOURCE [--parallel=N] """ sources = arguments.SOURCE or variables["storage"] or 'local:.' sources = Parameter.expand(sources) deletes = [] for source in sources: storage, entry = Parameter.separate(source) storage = storage or "local" deletes.append((storage, entry)) _sources = ', '.join(sources) for delete in deletes: service, entry = delete if arguments.dryrun: print(f"Dryrun: list {service}:{entry}") else: provider = Provider(service=service) provider.list(name=entry) return "" elif arguments.delete: """ storage delete SOURCE [--parallel=N] """ sources = arguments.SOURCE or variables["storage"] or 'local:.' sources = Parameter.expand(sources) deletes = [] for source in sources: storage, entry = Parameter.separate(source) storage = storage or "local" deletes.append((storage, entry)) _sources = ', '.join(sources) answer = yn_choice(f"Would you like to delete {_sources}?", default="no") if answer: for delete in deletes: service, entry = delete if arguments.dryrun: print(f"Dryrun: delete {service}:{entry}") else: provider = Provider(service=service) provider.delete(name=entry) else: Console.error("Deletion canceled") return "" elif arguments.search: for storage in arguments.storage: provider = Provider(storage) provider.search(arguments.DIRECTORY, arguments.FILENAME, arguments.recursive) elif arguments.rsync: # TODO: implement raise NotImplementedError elif arguments.copy: VERBOSE(f"COPY: Executing Copy command from {arguments.SOURCE} to " f"{arguments.DESTINATION} providers") print(f"DEBUG storage.py: INITIALIZE with {arguments.storage[0]} " "provider.") provider = Provider(arguments.storage[0]) result = provider.copy(arguments.SOURCE, arguments.DESTINATION, arguments.recursive) elif arguments.copy: scloud, sbucket = source.split(":", 1) or None tcloud, tbucket = target.split(":", 1) or None # print(scloud + " " + tcloud + " " + sbucket + " " + tbucket) if scloud == "aws" or scloud == "google": provider = Provider(service=scloud) provider.copy(scloud, tcloud, sbucket, tbucket) elif (scloud == "local" and tcloud == "aws") or (scloud == "local" and tcloud == "google"): provider = Provider(service=tcloud) provider.copy(scloud, tcloud, sbucket, tbucket) else: print("Not Implemented") return ""