def _ensure_aws_keypair(username, ssh_key_path, region): p = run( f'aws ec2 describe-key-pairs --key-name {username} --region {region}') if p.returncode != 0: p = run( f'aws ec2 import-key-pair --key-name {username} --public-key-material file://{ssh_key_path}.pub --region {region}' ) return p
def delete_image(image): p = run(f'doctl compute snapshot list --output json') snapshots = json.load(p.stdout) for s in snapshots: if s["name"] == image: p = run(f'doctl compute snapshot delete {s["id"]}') if p.returncode != 0: raise DeleteImageException(p.stderr) return
def _digitalocean_ssh_key_fingerprint(username, ssh_key_path): p = run(f'ssh-keygen -E md5 -lf {ssh_key_path}.pub') fingerprint = p.stdout.split(' ')[1][4:] p = run(f'doctl compute ssh-key get {fingerprint}') if p.returncode != 0: p = run( f'doctl compute ssh-key import username --public-key-file {ssh_key_path}.pub' ) return fingerprint
def _generate_gcloud_ssh_key(): if not os.path.exists(SSH_KEY_PATH): print("There is no key at {}, creating new key.".format(SSH_KEY_PATH)) # Generate new ssh key to log in. # WARNING: It will create a key with an empty passphrase. run(["ssh-keygen", "-f", SSH_KEY_PATH, "-t", "rsa", "-N", '""']) # Upload key run([ "gcloud", "compute", "os-login", "ssh-keys", "add", "--key-file={}.pub".format(SSH_KEY_PATH) ])
def _get_username(project): p = run(f'gcloud compute project-info describe --project {project}') project_metadata = yaml.safe_load( p.stdout)['commonInstanceMetadata']['items'] enable_oslogin = False for md in project_metadata: if md['key'] == 'enable-oslogin': if md['value'] == 'TRUE': enable_oslogin = True break if enable_oslogin: p = run(['gcloud', 'compute', 'os-login', 'describe-profile']) return yaml.safe_load(p.stdout)['posixAccounts'][0]['username'] else: return os.getlogin()
def ensure_user(self, username, *, sudo=True, pubkey=True): add_sudo = '' if sudo: add_sudo = f'echo "{username} ALL=(ALL) NOPASSWD:ALL" > /etc/sudoers.d/{username}' add_pubkey = '' if username == 'root': ssh_path = '/root/.ssh' else: ssh_path = f'/home/{username}/.ssh' if pubkey is True: pubkey = run( f'ssh-keygen -y -f {self.ssh_key_path}').stdout.strip() if pubkey: add_pubkey = f'''sudo -u {username} mkdir -p {ssh_path} if ! $(grep -Fxq '{pubkey}' {ssh_path}/authorized_keys); then sudo -u {username} echo "{pubkey}" >> {ssh_path}/authorized_keys fi ''' cmd = f''' useradd {username} --create-home --shell /bin/bash || true {add_sudo} {add_pubkey} ''' p = self.sudo(cmd) return p
def get_image(group, image): p = run(f'az image list -g {group}') images = json.loads(p.stdout) for i in images: if i['name'] == image: return i['id'] return None
def save_image(machine, image, *, group): if group == machine.name: raise SaveImageException('group must be different than machine') p = _ensure_group(group, machine.location) if p.returncode != 0: raise SaveImageException(p.stderr) p = run(f'az vm generalize -g {machine.name} --name {machine.name}') if p.returncode != 0: raise SaveImageException(p.stderr) p = run('az extension add -n image-copy-extension -y') p = run( f'az image copy --source-resource-group {machine.name} --source-object-name {machine.name} --target-location {machine.location} --target-resource-group {group} --cleanup' ) if p.returncode != 0: raise SaveImageException(p.stderr)
def create_firewall(name, *, direction='in', ports, ips=['0.0.0.0/0']): cmd = f'doctl compute firewall create --name {name} --tag-names {name}' rules = [] for port in ports: if port == 'icmp': rule = 'protocol:icmp,address:' rule += ',address:'.join(ips) else: protocol, port = port.split(':') rule = f'protocol:{protocol},ports:{port},address:' rule += ',address:'.join(ips) rules.append(rule) rules = ' '.join(rules) if direction == 'in': cmd += f" --inbound-rules '{rules}'" elif direction == 'out': cmd += f" --outbound-rules '{rules}'" else: raise FirewallRuleCreationException( 'direction must be either "in" or "out"') print(cmd) p = run(cmd) if p.returncode != 0: raise FirewallRuleCreationException(p.stderr) return Firewall(name, provider=digitalocean_provider, direction=direction, action='allow', ports=ports, ips=ips)
def list(): regions = _get_regions() instances = [] for r in regions: region_instances = json.loads( run(['aws', 'ec2', 'describe-instances', '--region', r]).stdout)['Reservations'][0]
def get(name, *, username=None, ssh_key_path=None, project=None, **kwargs): _generate_gcloud_ssh_key() cmd = [ 'gcloud', 'compute', 'instances', 'list', '--format', 'value(zone, name, networkInterfaces[0].accessConfigs[0].natIP)', '--filter', 'name=' + name ] if project: cmd += ['--project', project] else: project = get_project() p = run(cmd) if p.stdout.strip('\n') == '': return None zone, name, ip = re.split(r'\s+', p.stdout.strip('\n')) if username is None: username = _get_username(project) if ssh_key_path is None: ssh_key_path = SSH_KEY_PATH return Machine(provider=gcloud_provider, name=name, zone=zone, ip=ip, username=username, ssh_key_path=ssh_key_path, project=project)
def create(*, name, machine_size, disk_size_gb=None, image, location): args = ['--name', name] args += ['--resource-group', name] args += ['--image', image] args += ['--size', machine_size] if disk_size_gb is not None: args += ['--os-disk-size-gb', str(disk_size_gb)] if _group_exist(name): raise MachineCreationException('resource group already exist') p = _create_group(name, location) if p.returncode != 0: raise MachineCreationException(p.stderr) p = run(['az', 'vm', 'create', *args]) if p.returncode != 0: _delete_group(name) raise MachineCreationException(p.stderr) ip = json.loads(p.stdout)['publicIpAddress'] machine = Machine(provider=azure_provider, name=name, zone=None, ip=ip, username=os.getlogin(), ssh_key_path=SSH_KEY_PATH) return machine
def list(*, pattern=None, project=None, username=None, ssh_key_path=None): _generate_gcloud_ssh_key() cmd = [ 'gcloud', 'compute', 'instances', 'list', '--format', 'value(zone, name, networkInterfaces[0].accessConfigs[0].natIP)' ] if project: cmd += ['--project', project] else: project = get_project() p = run(cmd) result = [] lines = p.stdout.strip('\n').split('\n') for line in lines: zone, name, ip = re.split(r'\s+', line) if pattern: if pattern not in name: continue if username is None: username = _get_username(project) if ssh_key_path is None: ssh_key_path = SSH_KEY_PATH result.append( Machine(provider=gcloud_provider, name=name, zone=zone, ip=ip, username=username, ssh_key_path=ssh_key_path, project=project)) return result
def delete_image(image, *, project=None): command = ['gcloud', 'compute', 'images', 'delete', image] if project: command += ['--project', project] p = run(command) if p.returncode != 0: raise DeleteImageException(p.stderr)
def _release_ip_address(name, region, *, project=None): cmd = [ 'gcloud', 'compute', 'addresses', 'delete', '--region', region, name ] if project: cmd += ['--project', project] return run(cmd, input='yes\n')
def delete_disk(disk, zone, *, project=None): cmd = f'gcloud compute disks delete {disk} --zone {zone}' if project: cmd += f' --project {project}' p = run(cmd, input='y\n') if p.returncode != 0: raise DiskDeletionException(p.stderr)
def _address_exist(name, region, *, project=None): cmd = [ 'gcloud', 'compute', 'addresses', 'describe', '--region', region, name ] if project: cmd += ['--project', project] return run(cmd).returncode == 0
def add_disk(machine, disk): p = run( f'gcloud compute instances attach-disk {machine.name} --disk={disk} --zone {machine.zone} --device-name={disk} --project {machine.project}' ) if p.returncode != 0: raise MachineAddDiskException(p.stderr) p = machine.sudo('lsblk -f /dev/disk/by-id/google-' + disk + " | awk '{print $2}'") if p.returncode != 0: raise MachineAddDiskException(p.stderr) fstype = p.stdout.strip() assert fstype.startswith('FSTYPE') if fstype.strip() != 'FSTYPE': # it's already been formatted and there's a filesystem on it p = machine.sudo(f''' mkdir -p /mnt/{disk} mount -o discard,defaults /dev/disk/by-id/google-{disk} /mnt/{disk} chmod a+w /mnt/{disk} ''') else: p = machine.sudo(f''' sudo mkfs.ext4 -m 0 -F -E lazy_itable_init=0,lazy_journal_init=0,discard /dev/disk/by-id/google-{disk} mkdir -p /mnt/{disk} mount -o discard,defaults /dev/disk/by-id/google-{disk} /mnt/{disk} sudo chmod a+w /mnt/{disk} ''') if p.returncode != 0: raise MachineAddDiskException(p.stderr)
def download(self, machine_path, local_path): p = run( f'scp -o StrictHostKeyChecking=no -i {self.ssh_key_path} -r {self.username}@{self.ip}:{machine_path} {local_path}' ) if p.returncode != 0: raise DownloadException(p.stderr) return p
def shutdown(machine): p = run([ 'gcloud', 'compute', 'instances', 'stop', machine.name, '--zone', machine.zone, '--project', machine.project ]) if p.returncode != 0: raise MachineShutdownException(p.stderr)
def get(name, *, username=None, ssh_key_path=None, region=None, **kwargs): if region: if name.startswith('i-'): cmd = f'aws ec2 describe-instances --region {region} --instance-ids {name}' else: cmd = f'aws ec2 describe-instances --region {region} --filters Name=tag:Name,Values={name}' p = run(cmd) if p.returncode != 0: return None else: reservations = json.loads(p.stdout) instances = [] for reservation in reservations['Reservations']: for instance in reservation['Instances']: if instance['State']['Name'] != 'terminated': instances.append(instance) if len(instances) > 1: return None return _instance_to_machine(instances[0], username, ssh_key_path) else: machines = list(username=username, ssh_key_path=ssh_key_path) if name.startswith('i-'): for machine in machines: if machine.id == name: return machine else: for machine in machines: if machine.name == name: return machine return None
def _reserve_ip_address(ip, name, region, *, project=None): cmd = [ 'gcloud', 'compute', 'addresses', 'create', name, '--addresses', ip, '--region', region ] if project: cmd += ['--project', project] return run(cmd)
def create_firewall(name, allows=None): if allows: allow_param = ['--allow', ','.join(allows)] else: allow_param = ['--allow', 'tcp:22'] return run(['gcloud', 'compute', 'firewall-rules', 'create', name, '--target-tags', name, *allow_param])
def get_image(name): p = run(f'doctl compute image list') images = p.stdout.split('\n') for i in images: id_, n, *_ = re.split(r'\s+', i) if n == name: return id_ return None
def change_type(machine, new_type): p = run([ 'gcloud', 'compute', 'instances', 'set-machine-type', machine.name, '--zone', machine.zone, '--machine-type', new_type, '--project', machine.project ]) if p.returncode != 0: raise MachineChangeTypeException(p.stderr)
def bootup(machine): p = run([ 'gcloud', 'compute', 'instances', 'start', machine.name, '--zone', machine.zone, '--project', machine.project ]) if p.returncode != 0: raise MachineBootupException(p.stderr) machine.wait_ssh()
def remove_disk(machine, disk): p = machine.sudo(f'umount /dev/disk/by-id/google-{disk}') if p.returncode != 0 and 'No such file or directory' not in p.stderr: raise MachineRemoveDiskException(p.stderr) p = run( f'gcloud compute instances detach-disk {machine.name} --disk {disk} --zone {machine.zone} --project {machine.project}' ) if p.returncode != 0: raise MachineRemoveDiskException(p.stderr)
def get(name): p = run(['gcloud', 'compute', 'instances', 'list', '--format', 'value(zone, name, networkInterfaces[0].accessConfigs[0].natIP)', '--filter', 'name=' + name]) if p.stdout.strip('\n') == '': return None zone, name, ip = re.split(r'\s+', p.stdout.strip('\n')) return Machine(provider=gcloud_provider, name=name, zone=zone, ip=ip, username=_get_username(), ssh_key_path=SSH_KEY_PATH)
def _get_ip(name, *, project=None): cmd = [ 'gcloud', 'compute', 'instances', 'list', '--filter', 'name=' + name, '--format', 'get(networkInterfaces[0].accessConfigs[0].natIP)' ] if project: cmd += ['--project', project] p = run(cmd) return p.stdout.strip()
def change_type(machine, new_type): # new_type: a digitalocean machine size # doctl compute size list # Use slug to refer a machine size p = run( f'doctl compute droplet-action resize {machine.id} --size {new_type} --wait' ) if p.returncode != 0: raise MachineChangeTypeException(p.stderr)