def _create_server(self): log_green("Started...") log_yellow("...Creating GCE instance...") latest_image = self._get_latest_image(self.config.base_image_project, self.config.base_image_prefix) self.startup_instance(self.state.instance_name, latest_image["selfLink"], disk_name=None) self._set_instance_networking()
def create_ami(connection, region, instance_id, name, description, block_device_mapping=None, log=False): ami = connection.create_image(instance_id, name, description, block_device_mapping) image_status = connection.get_image(ami) while (image_status.state != "available" and image_status.state != "failed"): if log: log_yellow('creating ami...') sleep_for_one_minute() image_status = connection.get_image(ami) if image_status.state == "available": if log: log_green("ami %s %s" % (ami, image_status)) return(ami) else: if log: log_red("ami %s %s" % (ami, image_status)) return False
def create_instance_from_saved_state(): saved_state = load_state() cloud = saved_state['cloud'] specified_cloud = env.config.get('cloud') if specified_cloud and specified_cloud != cloud: log_red("The specified cloud: {} does not match the cloud " "specified in the saved state file: {}".format( specified_cloud, cloud)) sys.exit(1) distro = Distribution(saved_state['distro']) region = saved_state['region'] config = _get_platform_config(cloud, region, distro) log_green('Reusing instance from saved state...') instance_factory = _get_cloud_instance_factory(cloud) instance = instance_factory.create_from_saved_state( config, saved_state['state']) log_green('...Done') _setup_fab_for_instance(instance) _save_state_from_instance(instance) specified_distribution = env.config.get('distribution') if (specified_distribution and specified_distribution != instance.distro.value): log_red("The specified distribution: {} does not match the distro " "specified in the saved state file: {}".format( specified_distribution, instance.distro.value)) sys.exit(1) return instance
def _test_obor(mesos_masters=mesos_masters, mesos_slaves=mesos_slaves): log_green('running _test_obor') obor_env = [ "eval `ssh-agent`", "ssh-add $PWD/nixos-vagrant-configs/*.priv", ] # local() doesn't support most context managers # so let's bake a local environment file and consume as a prefix() with open('shell_env', 'w') as shell_env: for line in obor_env: shell_env.write(line + '\n') local('chmod +x shell_env') local('chmod 600 nixos-vagrant-configs/vagrant.priv') with settings(shell='/run/current-system/sw/bin/bash -l -c'): with prefix(". ./shell_env"): # pylint: disable=not-context-manager for target, _ in mesos_masters: local( "fab -i nixos-vagrant-configs/vagrant.priv " + "-H {} acceptance_tests_mesos_master ".format(target) + "> log/`date '+%Y%m%d%H%M%S'`." "{}.test_obor.log 2>&1".format(target)) for target, _ in mesos_slaves: local( "fab -i nixos-vagrant-configs/vagrant.priv " + "-H {} acceptance_tests_mesos_slave ".format(target) + "> log/`date '+%Y%m%d%H%M%S'`." "{}.test_obor.log 2>&1".format(target)) log_green('_test_obor completed')
def apt_add_key(keyid, keyserver='keyserver.ubuntu.com', log=False): """ trust a new PGP key related to a apt-repository """ if log: log_green('trusting keyid %s from %s' % (keyid, keyserver)) with settings(hide('warnings', 'running', 'stdout')): sudo('apt-key adv --keyserver %s --recv %s' % (keyserver, keyid)) return True
def list_images(self): images = self.connection.get_all_images(owners='self') log_yellow("creation time\timage_name\timage_id") for image in sorted(images, key=lambda x: x.creationDate): log_green("{}\t{:50}\t{}".format( image.creationDate, image.name, image.id) )
def list_images(self): images = self._nova.images.list() log_yellow("creation time\timage_name\timage_id") for image in sorted(images, key=lambda x: x.created): log_green("{}\t{:50}\t{}".format( image.created, image.human_id, image.id) )
def _provision_obor(nodes=nodes): log_green('running _provision_obor') local('chmod 600 nixos-vagrant-configs/vagrant.priv') count = 1 while True or count > 3: jobs = [] for node, hostdir in nodes: jobs.append( mp(target=local, args=("fab -i nixos-vagrant-configs/vagrant.priv " + "-H %s update:" % node + "host_dir=%s," % hostdir + "rsync='yes'," + "nix_gc='no'," + "nix_release='18.09'," + "switch='no'" + "> log/`date '+%Y%m%d%H%M%S'`." + "%s.provision.log 2>&1" % node, ))) for job in jobs: job.start() exit_code = 0 for job in jobs: job.join() exit_code = exit_code + job.exitcode if exit_code == 0: break count = count + 1 log_green('_provision_obor completed')
def down(self): instance = self.connection.stop_instances( instance_ids=self.state.instance_id)[0] while instance.state != "stopped": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() log_green('Instance state: %s' % instance.state)
def down(self): instance = self.connection.stop_instances( instance_ids=self.state.instance_id)[0] while instance.state != "stopped": log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() log_green('Instance state: %s' % instance.state)
def update(host_dir=None, rsync='yes', nix_gc='yes', nix_release='18.09', switch='no'): # pylint: disable=too-many-arguments """ deploy or update OBOR on a host """ log_green('running update on {}'.format(env.host_string)) if not host_dir: host_dir = env.host_string local('rm -f {}/result'.format(host_dir)) yes_answers = ['yes', 'y', 'YES', 'Y', 'True', 'true'] if rsync in yes_answers: with settings(warn_only=True, shell='/run/current-system/sw/bin/bash -l -c'): rsync_project(remote_dir='/etc/nixos/', local_dir=host_dir + '/', delete=True, extra_opts='--rsync-path="sudo rsync"', default_opts='-chavzPq', ssh_opts=' -o UserKnownHostsFile=/dev/null ' + '-o StrictHostKeyChecking=no ') rsync_project(remote_dir='/etc/nixos/common', local_dir='common/', delete=True, extra_opts='--rsync-path="sudo rsync"', default_opts='-chavzPq', ssh_opts=' -o UserKnownHostsFile=/dev/null ' + '-o StrictHostKeyChecking=no ') with settings(warn_only=True, shell='/run/current-system/sw/bin/bash -l -c'): sudo('rm -f /etc/nixos/result') if nix_gc in yes_answers: sudo('nix-collect-garbage -d >/dev/null') sudo('nix-channel --add ' 'https://nixos.org/channels/nixos-{} nixos'.format(nix_release)) sudo('nix-channel --update') def _nixos_rebuild(): """ wrapper for nixos-rebuild """ with settings(shell='/run/current-system/sw/bin/bash -l -c'): sudo('nixos-rebuild build -Q') sudo('nixos-rebuild boot -Q') def _nixos_switch(): """ wrapper for nixos-rebuild """ with settings(shell='/run/current-system/sw/bin/bash -l -c'): sudo('nixos-rebuild switch -Q') _nixos_rebuild() if switch in yes_answers: _nixos_switch()
def apt_add_key(keyid, keyserver='keyserver.ubuntu.com', log=False): """ trust a new PGP key related to a apt-repository """ if log: log_green( 'trusting keyid %s from %s' % (keyid, keyserver) ) with settings(hide('warnings', 'running', 'stdout')): sudo('apt-key adv --keyserver %s --recv %s' % (keyserver, keyid)) return True
def _set_instance_networking(self, server): ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) self.state = self.state.transform(['ip_address'], ip_address) wait_for_ssh(ip_address) log_green( 'Connected to server with IP address {0}.'.format(ip_address))
def _set_instance_networking(self, server): ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) self.state = self.state.transform(['ip_address'], ip_address) wait_for_ssh(ip_address) log_green('Connected to server with IP address {0}.'.format( ip_address) )
def _create_server(self): log_green("Started...") log_yellow("...Creating GCE instance...") latest_image = self._get_latest_image(self.config.base_image_project, self.config.base_image_prefix) self.startup_instance(self.state.instance_name, latest_image['selfLink'], disk_name=None) self._set_instance_networking()
def down_ec2(connection, instance_id, region, log=False): """ shutdown of an existing EC2 instance """ # get the instance_id from the state file, and stop the instance instance = connection.stop_instances(instance_ids=instance_id)[0] while instance.state != "stopped": if log: log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() if log: log_green('Instance state: %s' % instance.state)
def yum_group_install(**kwargs): """ instals a yum group """ for grp in list(kwargs['groups']): log_green("installing %s ..." % grp) if 'repo' in kwargs: repo = kwargs['repo'] sudo("yum groupinstall -y --quiet " "--enablerepo=%s '%s'" % (repo, grp)) else: sudo("yum groups mark install -y --quiet '%s'" % grp) sudo("yum groups mark convert -y --quiet '%s'" % grp) sudo("yum groupinstall -y --quiet '%s'" % grp)
def create_new_intance_from_config(cloud, distro, region): cloud_instance_factory = _get_cloud_instance_factory(cloud) log_green('Creating an instance from configuration...') instance = cloud_instance_factory.create_from_config( _get_platform_config(cloud, region, distro), distro, region) log_green('...Done') _setup_fab_for_instance(instance) _save_state_from_instance(instance) return instance
def yum_group_install(**kwargs): """ instals a yum group """ for grp in list(kwargs['groups']): log_green("installing %s ..." % grp) if 'repo' in kwargs: repo = kwargs['repo'] sudo("yum groupinstall -y --quiet " "--enablerepo=%s '%s'" % (repo, grp)) else: sudo("yum groups mark install -y --quiet '%s'" % grp) sudo("yum groups mark convert -y --quiet '%s'" % grp) sudo("yum groupinstall -y --quiet '%s'" % grp)
def destroy(self): server = self._nova.servers.find(name=self.state.instance_name) log_yellow('deleting rackspace instance ...') server.delete() try: while True: server = self._nova.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except NotFound: pass log_green('The server has been deleted')
def destroy(self): server = self._nova.servers.find(name=self.state.instance_name) log_yellow('deleting rackspace instance ...') server.delete() try: while True: server = self._nova.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except NotFound: pass log_green('The server has been deleted')
def create_image(): """ create ami/image for either AWS, Rackspace or GCE """ datestr = datetime.utcnow().strftime("%Y%m%d%H%M") instance = create_instance_from_saved_state() image_name = "{}-{}".format(instance.image_basename, datestr) image_id = instance.create_image(image_name) log_green('Created server image {}: {}'.format(image_name, image_id)) # GCE shuts the instance down before creating an image. In the case where # the instance comes back up with a different IP address, we need to # re-sync fab and the save state. _setup_fab_for_instance(instance) _save_state_from_instance(instance)
def _set_instance_networking(self): """ Pulls out the IP address for the instance and double checks that we can connect to it's ssh port. """ instance_data = ( self._compute.instances() .get(project=self.project, zone=self.zone, instance=self.state.instance_name) .execute() ) ip_address = instance_data["networkInterfaces"][0]["accessConfigs"][0]["natIP"] self.state = self.state.transform(["ip_address"], ip_address) wait_for_ssh(self.state.ip_address) log_green("Connected to server with IP address {0}.".format(ip_address))
def yum_install(**kwargs): """ installs a yum package """ if 'repo' in kwargs: repo = kwargs['repo'] for pkg in list(kwargs['packages']): if is_package_installed(distribution='el', pkg=pkg) is False: if 'repo' in locals(): log_green("installing %s from repo %s ..." % (pkg, repo)) sudo("yum install -y --quiet --enablerepo=%s %s" % (repo, pkg)) else: log_green("installing %s ..." % pkg) sudo("yum install -y --quiet %s" % pkg)
def startup_instance(self, instance_name, image, disk_name=None): """ For now, jclouds is broken for GCE and we will have static slaves in Jenkins. Use this to boot them. """ log_green("Started...") log_yellow("...Starting GCE Jenkins Slave Instance...") instance_config = self._get_instance_config(instance_name, image, disk_name) operation = ( self._compute.instances().insert(project=self.project, zone=self.zone, body=instance_config).execute() ) result = self._wait_until_done(operation) if not result: raise RuntimeError("Creation of VM timed out or returned no result") log_green("Instance has booted")
def yum_install(**kwargs): """ installs a yum package """ if 'repo' in kwargs: repo = kwargs['repo'] for pkg in list(kwargs['packages']): if is_package_installed(distribution='el', pkg=pkg) is False: if 'repo' in locals(): log_green( "installing %s from repo %s ..." % (pkg, repo)) sudo("yum install -y --quiet --enablerepo=%s %s" % (repo, pkg)) else: log_green("installing %s ..." % pkg) sudo("yum install -y --quiet %s" % pkg)
def destroy_rackspace(connection, region, instance_id): """ terminates the instance """ server = connection.servers.get(instance_id) log_yellow('deleting rackspace instance ...') server.delete() # wait for server to be deleted try: while True: server = connection.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except: pass log_green('The server has been deleted')
def _set_instance_networking(self): """ Pulls out the IP address for the instance and double checks that we can connect to it's ssh port. """ instance_data = self._compute.instances().get( project=self.project, zone=self.zone, instance=self.state.instance_name).execute() ip_address = ( instance_data['networkInterfaces'][0]['accessConfigs'][0]['natIP']) self.state = self.state.transform(['ip_address'], ip_address) wait_for_ssh(self.state.ip_address) log_green( 'Connected to server with IP address {0}.'.format(ip_address))
def destroy_rackspace(connection, region, instance_id): """ terminates the instance """ server = connection.servers.get(instance_id) log_yellow('deleting rackspace instance ...') server.delete() # wait for server to be deleted try: while True: server = connection.servers.get(server.id) log_yellow('waiting for deletion ...') sleep(5) except: pass log_green('The server has been deleted')
def apt_install_from_url(pkg_name, url, log=False): """ installs a pkg from a url p pkg_name: the name of the package to install p url: the full URL for the rpm package """ if is_package_installed(distribution='ubuntu', pkg=pkg_name) is False: if log: log_green("installing %s from %s" % (pkg_name, url)) with settings(hide('warnings', 'running', 'stdout'), capture=True): sudo("wget -c -O %s.deb %s" % (pkg_name, url)) sudo("dpkg -i %s.deb" % pkg_name) # if we didn't abort above, we should return True return True
def create_server_rackspace(connection, distribution, disk_name, disk_size, ami, region, key_pair, instance_type, instance_name, tags={}, security_groups=None): """ Creates Rackspace Instance and saves it state in a local json file """ log_yellow("Creating Rackspace instance...") flavor = connection.flavors.find(name=instance_type) image = connection.images.find(name=ami) server = connection.servers.create(name=instance_name, flavor=flavor.id, image=image.id, region=region, availability_zone=region, key_name=key_pair) while server.status == 'BUILD': log_yellow("Waiting for build to finish...") sleep(5) server = connection.servers.get(server.id) # check for errors if server.status != 'ACTIVE': log_red("Error creating rackspace instance") exit(1) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) wait_for_ssh(ip_address) log_green('New server with IP address {0}.'.format(ip_address)) return server
def create_server_rackspace(connection, distribution, disk_name, disk_size, ami, region, key_pair, instance_type, instance_name, tags={}, security_groups=None): """ Creates Rackspace Instance and saves it state in a local json file """ log_yellow("Creating Rackspace instance...") flavor = connection.flavors.find(name=instance_type) image = connection.images.find(name=ami) server = connection.servers.create(name=instance_name, flavor=flavor.id, image=image.id, region=region, availability_zone=region, key_name=key_pair) while server.status == 'BUILD': log_yellow("Waiting for build to finish...") sleep(5) server = connection.servers.get(server.id) # check for errors if server.status != 'ACTIVE': log_red("Error creating rackspace instance") exit(1) # the server was assigned IPv4 and IPv6 addresses, locate the IPv4 address ip_address = server.accessIPv4 if ip_address is None: log_red('No IP address assigned') exit(1) wait_for_ssh(ip_address) log_green('New server with IP address {0}.'.format(ip_address)) return server
def startup_instance(self, instance_name, image, disk_name=None): """ For now, jclouds is broken for GCE and we will have static slaves in Jenkins. Use this to boot them. """ log_green("Started...") log_yellow("...Starting GCE Jenkins Slave Instance...") instance_config = self._get_instance_config(instance_name, image, disk_name) operation = self._compute.instances().insert( project=self.project, zone=self.zone, body=instance_config).execute() result = self._wait_until_done(operation) if not result: raise RuntimeError( "Creation of VM timed out or returned no result") log_green("Instance has booted")
def apt_install_from_url(pkg_name, url, log=False): """ installs a pkg from a url p pkg_name: the name of the package to install p url: the full URL for the rpm package """ if is_package_installed(distribution='ubuntu', pkg=pkg_name) is False: if log: log_green( "installing %s from %s" % (pkg_name, url)) with settings(hide('warnings', 'running', 'stdout'), capture=True): sudo("wget -c -O %s.deb %s" % (pkg_name, url)) sudo("dpkg -i %s.deb" % pkg_name) # if we didn't abort above, we should return True return True
def _destroy_ebs_volume(self, volume_id): """ destroys an ebs volume """ if self._ebs_volume_exists(volume_id): log_yellow('destroying EBS volume ...') try: self.connection.delete_volume(volume_id) except Exception as e: # our EBS volume may be gone, but AWS info tables are stale # wait a bit and ask again log_yellow("exception raised when deleting volume") log_yellow("{} -- {}".format(type(e), str(e))) worked = False for i in range(6): sleep(5) if not self._ebs_volume_exists(volume_id): log_green("It worked that time") worked = True if not worked: raise Exception("Couldn't delete EBS volume")
def yum_install_from_url(pkg_name, url): """ installs a pkg from a url p pkg_name: the name of the package to install p url: the full URL for the rpm package """ if is_package_installed(distribution='el', pkg=pkg_name) is False: log_green("installing %s from %s" % (pkg_name, url)) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): result = sudo("rpm -i %s" % url) if result.return_code == 0: return True elif result.return_code == 1: return False else: # print error to user print(result) raise SystemExit()
def yum_install_from_url(pkg_name, url): """ installs a pkg from a url p pkg_name: the name of the package to install p url: the full URL for the rpm package """ if is_package_installed(distribution='el', pkg=pkg_name) is False: log_green( "installing %s from %s" % (pkg_name, url)) with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True, capture=True): result = sudo("rpm -i %s" % url) if result.return_code == 0: return True elif result.return_code == 1: return False else: # print error to user print(result) raise SystemExit()
def _destroy_ebs_volume(self, volume_id): """ destroys an ebs volume """ if self._ebs_volume_exists(volume_id): log_yellow('destroying EBS volume ...') try: self.connection.delete_volume(volume_id) except Exception as e: # our EBS volume may be gone, but AWS info tables are stale # wait a bit and ask again log_yellow("exception raised when deleting volume") log_yellow("{} -- {}".format(type(e), str(e))) worked = False for i in range(6): sleep(5) if not self._ebs_volume_exists(volume_id): log_green("It worked that time") worked = True if not worked: raise Exception("Couldn't delete EBS volume")
def create_image(self, image_name): ami = self.connection.create_image( self.state.instance_id, image_name, description=self.config.image_description, ) image_status = self.connection.get_image(ami) while (image_status.state != "available" and image_status.state != "failed"): log_yellow('creating ami...') sleep(60) image_status = self.connection.get_image(ami) if image_status.state == "available": log_green("ami %s %s" % (ami, image_status)) return (ami) else: log_red("ami %s %s" % (ami, image_status)) return False
def create_image(self, image_name): ami = self.connection.create_image( self.state.instance_id, image_name, description=self.config.image_description, ) image_status = self.connection.get_image(ami) while (image_status.state != "available" and image_status.state != "failed"): log_yellow('creating ami...') sleep(60) image_status = self.connection.get_image(ami) if image_status.state == "available": log_green("ami %s %s" % (ami, image_status)) return(ami) else: log_red("ami %s %s" % (ami, image_status)) return False
def clean(): """ destroy all VMs """ log_green('running clean') jobs = [] jobs.append(mp(target=destroy_railtrack)) jobs.append( mp(target=local, args=("vagrant destroy -f " "> log/`date '+%Y%m%d%H%M%S'`.vagrant.destroy.log 2>&1", ))) for job in jobs: job.start() exit_code = 0 for job in jobs: job.join() exit_code = exit_code + job.exitcode if exit_code != 0: raise Exception('clean failed') log_green('running clean completed')
def upload_key(self): try: log_green("Checking for key pair {}".format(self.config.key_pair)) self._nova.keypairs.get(self.config.key_pair) log_green("Key pair exists in rackspace") except NotFound: log_green("Creating key pair {}".format(self.config.key_pair)) with open(self.config.public_key_filename) as keyfile: self._nova.keypairs.create(self.config.key_pair, keyfile.read())
def upload_key(self): try: log_green("Checking for key pair {}".format(self.config.key_pair)) self._nova.keypairs.get(self.config.key_pair) log_green("Key pair exists in rackspace") except NotFound: log_green("Creating key pair {}".format(self.config.key_pair)) with open(self.config.public_key_filename) as keyfile: self._nova.keypairs.create(self.config.key_pair, keyfile.read())
def create_rackspace_image(connection, server_id, name, description, block_device_mapping=None): image_id = connection.servers.create_image(server_id, name) image = connection.images.get(image_id).status.lower() log_green('creating rackspace image...') while connection.images.get(image_id).status.lower() not in ['active', 'error']: log_green('building rackspace image...') sleep_for_one_minute() if image == 'error': log_red('error creating image') sys.exit(1) log_green('finished image: %s' % image_id) return image_id
def create_image(self, image_name): server = self._nova.servers.find(name=self.state.instance_name) image_id = self._nova.servers.create_image(server.id, image_name=image_name) image = self._nova.images.get(image_id).status.lower() log_green('creating rackspace image...') sleep_time = 20 elapsed = 0 while self._nova.images.get(image_id).status.lower() not in ['active', 'error']: log_green('building rackspace image, ' 'this could take a bit: elapsed {}s.'.format(elapsed)) sleep(20) elapsed += sleep_time if image == 'error': log_red('error creating image') exit(1) log_green('finished image: %s' % image_id) return image_id
def create_image(self, image_name): server = self._nova.servers.find(name=self.state.instance_name) image_id = self._nova.servers.create_image(server.id, image_name=image_name) image = self._nova.images.get(image_id).status.lower() log_green('creating rackspace image...') sleep_time = 20 elapsed = 0 while self._nova.images.get(image_id).status.lower() not in [ 'active', 'error' ]: log_green('building rackspace image, ' 'this could take a bit: elapsed {}s.'.format(elapsed)) sleep(20) elapsed += sleep_time if image == 'error': log_red('error creating image') exit(1) log_green('finished image: %s' % image_id) return image_id
def create_rackspace_image(connection, server_id, name, description, block_device_mapping=None): image_id = connection.servers.create_image(server_id, name) image = connection.images.get(image_id).status.lower() log_green('creating rackspace image...') while connection.images.get(image_id).status.lower() not in [ 'active', 'error' ]: log_green('building rackspace image...') sleep_for_one_minute() if image == 'error': log_red('error creating image') sys.exit(1) log_green('finished image: %s' % image_id) return image_id
def cache_docker_image_locally(docker_image, log=False): if log: log_green('pulling docker image %s locally' % docker_image) sudo("docker pull %s" % docker_image)
def list_images(self): images = self._nova.images.list() log_yellow("creation time\timage_name\timage_id") for image in sorted(images, key=lambda x: x.created): log_green("{}\t{:50}\t{}".format(image.created, image.human_id, image.id))
def list_images(self): results = self._compute.images().list(project=self.project).execute() log_yellow("creation time\timage_name") for item in results["items"]: log_green("{}\t{}".format(item["creationTimestamp"], item["name"]))
def delete_image(self, image_name): log_green("Deleting image {}".format(image_name)) result = self._wait_until_done(self._compute.images().delete(project=self.project, image=image_name).execute()) log_yellow("Delete image returned status {}".format(result["status"]))
def _setup_fab_for_instance(instance): log_green('Setting fab environment to work with instance.') env.user = instance.username env.key_filename = instance.key_filename
def list_images(self): results = self._compute.images().list(project=self.project).execute() log_yellow("creation time\timage_name") for item in results['items']: log_green("{}\t{}".format(item['creationTimestamp'], item['name']))
def delete_image(self, image_name): log_green("Deleting image {}".format(image_name)) result = self._wait_until_done(self._compute.images().delete( project=self.project, image=image_name).execute()) log_yellow("Delete image returned status {}".format(result['status']))
def _create_server_ec2(connection, region, disk_name, disk_size, ami, key_pair, instance_type, tags={}, security_groups=None, delete_on_termination=True, log=False, wait_for_ssh_available=True): """ Creates EC2 Instance """ if log: log_green("Started...") log_yellow("...Creating EC2 instance...") ebs_volume = EBSBlockDeviceType() ebs_volume.size = disk_size bdm = BlockDeviceMapping() bdm[disk_name] = ebs_volume # get an ec2 ami image object with our choosen ami image = connection.get_all_images(ami)[0] # start a new instance reservation = image.run(1, 1, key_name=key_pair, security_groups=security_groups, block_device_map=bdm, instance_type=instance_type) # and get our instance_id instance = reservation.instances[0] # and loop and wait until ssh is available while instance.state == u'pending': if log: log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() if log: log_green("Instance state: %s" % instance.state) if wait_for_ssh_available: wait_for_ssh(instance.public_dns_name) # update the EBS volumes to be deleted on instance termination if delete_on_termination: for dev, bd in instance.block_device_mapping.items(): instance.modify_attribute('BlockDeviceMapping', ["%s=%d" % (dev, 1)]) # add a tag to our instance if tags: connection.create_tags([instance.id], tags) if log: log_green("Public dns: %s" % instance.public_dns_name) # returns our new instance return instance
def cache_docker_image_locally(docker_image, log=False): if log: log_green('pulling docker image %s locally' % docker_image) sudo("docker pull %s" % docker_image)
def list_images(self): images = self.connection.get_all_images(owners='self') log_yellow("creation time\timage_name\timage_id") for image in sorted(images, key=lambda x: x.creationDate): log_green("{}\t{:50}\t{}".format(image.creationDate, image.name, image.id))
def _create_server_ec2(connection, region, disk_name, disk_size, ami, key_pair, instance_type, tags={}, security_groups=None, delete_on_termination=True, log=False, wait_for_ssh_available=True): """ Creates EC2 Instance """ if log: log_green("Started...") log_yellow("...Creating EC2 instance...") ebs_volume = EBSBlockDeviceType() ebs_volume.size = disk_size bdm = BlockDeviceMapping() bdm[disk_name] = ebs_volume # get an ec2 ami image object with our choosen ami image = connection.get_all_images(ami)[0] # start a new instance reservation = image.run(1, 1, key_name=key_pair, security_groups=security_groups, block_device_map=bdm, instance_type=instance_type) # and get our instance_id instance = reservation.instances[0] # and loop and wait until ssh is available while instance.state == u'pending': if log: log_yellow("Instance state: %s" % instance.state) sleep(10) instance.update() if log: log_green("Instance state: %s" % instance.state) if wait_for_ssh_available: wait_for_ssh(instance.public_dns_name) # update the EBS volumes to be deleted on instance termination if delete_on_termination: for dev, bd in instance.block_device_mapping.items(): instance.modify_attribute('BlockDeviceMapping', ["%s=%d" % (dev, 1)]) # add a tag to our instance if tags: connection.create_tags([instance.id], tags) if log: log_green("Public dns: %s" % instance.public_dns_name) # returns our new instance return instance