def create_instance(connection, instance_name, config, key_name): bdm = None if 'device_map' in config: bdm = BlockDeviceMapping() for device, device_info in config['device_map'].items(): bdm[device] = BlockDeviceType(size=device_info['size'], delete_on_termination=True) if 'user_data_file' in config: log.debug("reading user_data from '%s'" % config['user_data_file']) user_data = open(config['user_data_file']).read() # assert that there are no values in need of formatting user_data = user_data.format() else: user_data = None subnet_id = random.choice(config.get('subnet_ids')) interface = NetworkInterfaceSpecification( subnet_id=subnet_id, delete_on_termination=True, groups=config.get('security_group_ids', []), associate_public_ip_address=config.get("use_public_ip") ) interfaces = NetworkInterfaceCollection(interface) reservation = connection.run_instances( image_id=config['ami'], key_name=key_name, instance_type=config['instance_type'], block_device_map=bdm, client_token=str(uuid.uuid4())[:16], disable_api_termination=bool(config.get('disable_api_termination')), user_data=user_data, instance_profile_name=config.get('instance_profile_name'), network_interfaces=interfaces, ) instance = reservation.instances[0] instance.add_tag('Name', instance_name) log.info("instance %s created, waiting to come up", instance) # Wait for the instance to come up wait_for_status(instance, 'state', 'running', 'update') log.info("instance %s is running; waiting for shutdown", instance) wait_for_status(instance, 'state', 'stopped', 'update') log.info("clearing userData") instance.modify_attribute("userData", None) return instance
def create_instance(connection, instance_name, config, key_name): bdm = None if 'device_map' in config: bdm = BlockDeviceMapping() for device, device_info in config['device_map'].items(): bdm[device] = BlockDeviceType(size=device_info['size'], delete_on_termination=True) if 'user_data_file' in config: log.debug("reading user_data from '%s'" % config['user_data_file']) user_data = open(config['user_data_file']).read() # assert that there are no values in need of formatting user_data = user_data.format() else: user_data = None subnet_id = random.choice(config.get('subnet_ids')) interface = NetworkInterfaceSpecification( subnet_id=subnet_id, delete_on_termination=True, groups=config.get('security_group_ids', []), associate_public_ip_address=config.get("use_public_ip")) interfaces = NetworkInterfaceCollection(interface) reservation = connection.run_instances( image_id=config['ami'], key_name=key_name, instance_type=config['instance_type'], block_device_map=bdm, client_token=str(uuid.uuid4())[:16], disable_api_termination=bool(config.get('disable_api_termination')), user_data=user_data, instance_profile_name=config.get('instance_profile_name'), network_interfaces=interfaces, ) instance = reservation.instances[0] instance.add_tag('Name', instance_name) log.info("instance %s created, waiting to come up", instance) # Wait for the instance to come up wait_for_status(instance, 'state', 'running', 'update') log.info("instance %s is running; waiting for shutdown", instance) wait_for_status(instance, 'state', 'stopped', 'update') log.info("clearing userData") instance.modify_attribute("userData", None) return instance
def attach_and_wait(host_instance, size, aws_dev_name, int_dev_name): v = host_instance.connection.create_volume(size, host_instance.placement) while True: try: v.attach(host_instance.id, aws_dev_name) break except: log.debug('waiting for volume to be attached') time.sleep(10) wait_for_status(v, "status", "in-use", "update") while True: try: if run('ls %s' % int_dev_name).succeeded: break except: log.debug('waiting for volume to appear', exc_info=True) time.sleep(10) return v
def create_instance(name, config, region, key_name, ssh_key, instance_data, deploypass, loaned_to, loan_bug, create_ami, ignore_subnet_check, max_attempts): """Creates an AMI instance with the given name and config. The config must specify things like ami id.""" conn = get_aws_connection(region) # Make sure we don't request the same things twice token = str(uuid.uuid4())[:16] instance_data = instance_data.copy() instance_data['name'] = name instance_data['domain'] = config['domain'] instance_data['hostname'] = '{name}.{domain}'.format( name=name, domain=config['domain']) ami = conn.get_all_images(image_ids=[config["ami"]])[0] bdm = None if 'device_map' in config: bdm = BlockDeviceMapping() for device, device_info in config['device_map'].items(): bd = BlockDeviceType() if device_info.get('size'): bd.size = device_info['size'] # Overwrite root device size for HVM instances, since they cannot # be resized online if ami.virtualization_type == "hvm" and \ ami.root_device_name == device: bd.size = ami.block_device_mapping[ami.root_device_name].size if device_info.get("delete_on_termination") is not False: bd.delete_on_termination = True if device_info.get("ephemeral_name"): bd.ephemeral_name = device_info["ephemeral_name"] bdm[device] = bd interfaces = make_instance_interfaces(region, instance_data['hostname'], ignore_subnet_check, config.get('subnet_ids'), config.get('security_group_ids', []), config.get("use_public_ip")) keep_going, attempt = True, 1 while keep_going: try: if 'user_data_file' in config: user_data = open(config['user_data_file']).read() else: user_data = get_user_data_tmpl(config['type']) if user_data: user_data = user_data.format( puppet_server=instance_data.get('default_puppet_server'), fqdn=instance_data['hostname'], hostname=instance_data['name'], domain=instance_data['domain'], dns_search_domain=config.get('dns_search_domain'), password=deploypass, moz_instance_type=config['type'], region_dns_atom=get_region_dns_atom(region), ) reservation = conn.run_instances( image_id=config['ami'], key_name=key_name, instance_type=config['instance_type'], block_device_map=bdm, client_token=token, disable_api_termination=config.get('disable_api_termination'), user_data=user_data, instance_profile_name=config.get('instance_profile_name'), network_interfaces=interfaces, ) break except boto.exception.BotoServerError: log.exception("Cannot start an instance") time.sleep(10) if max_attempts: attempt += 1 keep_going = max_attempts >= attempt instance = reservation.instances[0] log.info("instance %s created, waiting to come up", instance) # Wait for the instance to come up wait_for_status(instance, "state", "running", "update") instance.add_tag('Name', name) instance.add_tag('FQDN', instance_data['hostname']) instance.add_tag('created', time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime())) instance.add_tag('moz-type', config['type']) if loaned_to: instance.add_tag("moz-loaned-to", loaned_to) if loan_bug: instance.add_tag("moz-bug", loan_bug) log.info("assimilating %s", instance) instance.add_tag('moz-state', 'pending') keep_going, attempt = True, 1 while keep_going: try: # Don't reboot if need to create ami reboot = not create_ami assimilate_instance(instance=instance, config=config, ssh_key=ssh_key, instance_data=instance_data, deploypass=deploypass, reboot=reboot) break except NetworkError as e: # it takes a while for the machine to start/reboot so the # NetworkError exception is quite common, just log the error, # without the full stack trace log.warn( "cannot connect; instance may still be starting %s (%s, %s) - %s," "retrying in %d sec ...", instance_data['hostname'], instance.id, instance.private_ip_address, e, FAILURE_TIMEOUT) time.sleep(FAILURE_TIMEOUT) except: # any other exception log.warn( "problem assimilating %s (%s, %s), retrying in " "%d sec ...", instance_data['hostname'], instance.id, instance.private_ip_address, FAILURE_TIMEOUT, exc_info=True) time.sleep(FAILURE_TIMEOUT) if max_attempts: attempt += 1 keep_going = max_attempts >= attempt instance.add_tag('moz-state', 'ready') if create_ami: ami_name = "spot-%s-%s" % ( config['type'], time.strftime("%Y-%m-%d-%H-%M", time.gmtime())) log.info("Generating AMI %s", ami_name) ami_cleanup(mount_point="/", distro=config["distro"]) root_bd = instance.block_device_mapping[instance.root_device_name] volume = instance.connection.get_all_volumes( volume_ids=[root_bd.volume_id])[0] # The instance has to be stopped to flush EBS caches instance.stop() wait_for_status(instance, 'state', 'stopped', 'update') ami = volume_to_ami(volume=volume, ami_name=ami_name, arch=instance.architecture, virtualization_type=instance.virtualization_type, kernel_id=instance.kernel, root_device_name=instance.root_device_name, tags=config["tags"]) log.info("AMI %s (%s) is ready", ami_name, ami.id) log.warn("Terminating %s", instance) instance.terminate()
def create_instance(name, config, region, key_name, ssh_key, instance_data, deploypass, loaned_to, loan_bug, create_ami, ignore_subnet_check, max_attempts): """Creates an AMI instance with the given name and config. The config must specify things like ami id.""" conn = get_aws_connection(region) # Make sure we don't request the same things twice token = str(uuid.uuid4())[:16] instance_data = instance_data.copy() instance_data['name'] = name instance_data['domain'] = config['domain'] instance_data['hostname'] = '{name}.{domain}'.format( name=name, domain=config['domain']) ami = conn.get_all_images(image_ids=[config["ami"]])[0] bdm = None if 'device_map' in config: bdm = BlockDeviceMapping() for device, device_info in config['device_map'].items(): bd = BlockDeviceType() if device_info.get('size'): bd.size = device_info['size'] # Overwrite root device size for HVM instances, since they cannot # be resized online if ami.virtualization_type == "hvm" and \ ami.root_device_name == device: bd.size = ami.block_device_mapping[ami.root_device_name].size if device_info.get("delete_on_termination") is not False: bd.delete_on_termination = True if device_info.get("ephemeral_name"): bd.ephemeral_name = device_info["ephemeral_name"] if device_info.get("volume_type"): bd.volume_type = device_info["volume_type"] if device_info["volume_type"] == "io1" \ and device_info.get("iops"): bd.iops = device_info["iops"] bdm[device] = bd interfaces = make_instance_interfaces( region, instance_data['hostname'], ignore_subnet_check, config.get('subnet_ids'), config.get('security_group_ids', []), config.get("use_public_ip")) keep_going, attempt = True, 1 while keep_going: try: puppet_master = pick_puppet_master(instance_data.get('puppet_masters')) user_data = user_data_from_template(config['type'], { "puppet_server": puppet_master, "fqdn": instance_data['hostname'], "hostname": instance_data['name'], "domain": instance_data['domain'], "dns_search_domain": config.get('dns_search_domain'), "password": deploypass, "moz_instance_type": config['type'], "region_dns_atom": get_region_dns_atom(region)}) reservation = conn.run_instances( image_id=config['ami'], key_name=key_name, instance_type=config['instance_type'], block_device_map=bdm, client_token=token, disable_api_termination=config.get('disable_api_termination'), user_data=user_data, instance_profile_name=config.get('instance_profile_name'), network_interfaces=interfaces, ) break except boto.exception.BotoServerError: log.exception("Cannot start an instance") time.sleep(10) if max_attempts: attempt += 1 keep_going = max_attempts >= attempt instance = reservation.instances[0] log.info("instance %s created, waiting to come up", instance) # Wait for the instance to come up wait_for_status(instance, "state", "running", "update") instance.add_tag('Name', name) instance.add_tag('FQDN', instance_data['hostname']) instance.add_tag('created', time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime())) instance.add_tag('moz-type', config['type']) if loaned_to: instance.add_tag("moz-loaned-to", loaned_to) if loan_bug: instance.add_tag("moz-bug", loan_bug) log.info("assimilating %s", instance) instance.add_tag('moz-state', 'pending') keep_going, attempt = True, 1 while keep_going: try: # Don't reboot if need to create ami reboot = not create_ami assimilate_instance(instance=instance, config=config, ssh_key=ssh_key, instance_data=instance_data, deploypass=deploypass, reboot=reboot) break except NetworkError as e: # it takes a while for the machine to start/reboot so the # NetworkError exception is quite common, just log the error, # without the full stack trace log.warn("cannot connect; instance may still be starting %s (%s, %s) - %s," "retrying in %d sec ...", instance_data['hostname'], instance.id, instance.private_ip_address, e, FAILURE_TIMEOUT) time.sleep(FAILURE_TIMEOUT) except: # any other exception log.warn("problem assimilating %s (%s, %s), retrying in " "%d sec ...", instance_data['hostname'], instance.id, instance.private_ip_address, FAILURE_TIMEOUT, exc_info=True) time.sleep(FAILURE_TIMEOUT) if max_attempts: attempt += 1 keep_going = max_attempts >= attempt instance.add_tag('moz-state', 'ready') if create_ami: ami_name = "spot-%s-%s" % ( config['type'], time.strftime("%Y-%m-%d-%H-%M", time.gmtime())) log.info("Generating AMI %s", ami_name) ami_cleanup(mount_point="/", distro=config["distro"]) root_bd = instance.block_device_mapping[instance.root_device_name] volume = instance.connection.get_all_volumes( volume_ids=[root_bd.volume_id])[0] # The instance has to be stopped to flush EBS caches instance.stop() wait_for_status(instance, 'state', 'stopped', 'update') ami = volume_to_ami(volume=volume, ami_name=ami_name, arch=instance.architecture, virtualization_type=instance.virtualization_type, kernel_id=instance.kernel, root_device_name=instance.root_device_name, tags=config["tags"]) log.info("AMI %s (%s) is ready", ami_name, ami.id) log.warn("Terminating %s", instance) instance.terminate()
def create_ami(host_instance, args, config, instance_config, ssh_key, key_filename, instance_data, deploypass, cert, pkey, ami_name_prefix): connection = host_instance.connection setup_fabric_env(instance=host_instance, abort_on_prompts=True, disable_known_hosts=True, key_filename=key_filename) target_name = args.config virtualization_type = config.get("virtualization_type") config_dir = "%s/%s" % (AMI_CONFIGS_DIR, target_name) if ami_name_prefix: prefix = ami_name_prefix else: prefix = args.config dated_target_name = "{}-{}".format( prefix, time.strftime("%Y-%m-%d-%H-%M", time.gmtime())) if config.get('distro') in ('debian', 'ubuntu'): ubuntu_release = config.get("release", "precise") int_dev_name = config['target']['int_dev_name'] mount_dev = int_dev_name grub_dev = int_dev_name mount_point = config['target']['mount_point'] boot_mount_dev = None host_packages_file = os.path.join(config_dir, "host_packages") packages_file = os.path.join(config_dir, "packages") if os.path.exists(host_packages_file): install_packages(host_packages_file, config.get('distro')) v = attach_and_wait(host_instance, config['target']['size'], config['target']['aws_dev_name'], int_dev_name) # Step 0: install required packages if config.get('distro') == "centos": run('which MAKEDEV >/dev/null || yum -d 1 install -y MAKEDEV') # Step 1: prepare target FS run('mkdir -p %s' % mount_point) if config.get("root_device_type") == "instance-store": # Use file image mount_dev = "/dev/cloud_root/lv_root" grub_dev = "/dev/loop0" boot_mount_dev = "/dev/mapper/loop0p1" img_file = dated_target_name partition_image(mount_dev=mount_dev, int_dev_name=int_dev_name, img_file=img_file) elif virtualization_type == "hvm": # use EBS volume mount_dev = "/dev/cloud_root/lv_root" boot_mount_dev = "%s1" % int_dev_name partition_ebs_volume(int_dev_name=int_dev_name) run('/sbin/mkfs.{fs_type} {args} {dev}'.format( fs_type=config['target']['fs_type'], args=config['target'].get("mkfs_args", ""), dev=mount_dev)) run('/sbin/e2label {dev} {label}'.format( dev=mount_dev, label=config['target']['e2_label'])) run('mount {dev} {mount_point}'.format(dev=mount_dev, mount_point=mount_point)) run('mkdir {0}/dev {0}/proc {0}/etc {0}/boot {0}/sys'.format(mount_point)) run('mount -t sysfs sys %s/sys' % mount_point) if config.get('distro') not in ('debian', 'ubuntu'): run('mount -t proc proc %s/proc' % mount_point) run('for i in console null zero random urandom; ' 'do /sbin/MAKEDEV -d %s/dev -x $i ; done' % mount_point) if boot_mount_dev: run('mount {} {}/boot'.format(boot_mount_dev, mount_point)) # Step 2: install base system if config.get('distro') in ('debian', 'ubuntu'): run("debootstrap %s %s " "http://puppet/repos/apt/ubuntu/" % (ubuntu_release, mount_point)) run('chroot %s mount -t proc none /proc' % mount_point) run('mount -o bind /dev %s/dev' % mount_point) put('%s/releng-public-%s.list' % (AMI_CONFIGS_DIR, ubuntu_release), '%s/etc/apt/sources.list' % mount_point) with lcd(config_dir): put('usr/sbin/policy-rc.d', '%s/usr/sbin/' % mount_point, mirror_local_mode=True) install_packages(packages_file, config.get('distro'), chroot=mount_point) else: with lcd(config_dir): put('etc/yum-local.cfg', '%s/etc/yum-local.cfg' % mount_point) yum = 'yum -d 1 -c {0}/etc/yum-local.cfg -y --installroot={0} '.format( mount_point) # this groupinstall emulates the %packages section of the kickstart # config, which defaults to Core and Base. run('%s groupinstall Core Base' % yum) run('%s clean packages' % yum) # Rebuild RPM DB for cases when versions mismatch run('chroot %s rpmdb --rebuilddb || :' % mount_point) # Step 3: upload custom configuration files run('chroot %s mkdir -p /boot/grub' % mount_point) for directory in ('boot', 'etc', 'usr'): local_directory = os.path.join(config_dir, directory) remote_directory = os.path.join(mount_point, directory) if not os.path.exists(local_directory): pass sync(local_directory, remote_directory) # Step 4: tune configs run('sed -i -e s/@ROOT_DEV_LABEL@/{label}/g -e s/@FS_TYPE@/{fs}/g ' '{mnt}/etc/fstab'.format(label=config['target']['e2_label'], fs=config['target']['fs_type'], mnt=mount_point)) if config.get('distro') in ('debian', 'ubuntu'): if virtualization_type == "hvm": run("chroot {mnt} grub-install {int_dev_name}".format( mnt=mount_point, int_dev_name=int_dev_name)) run("chroot {mnt} update-grub".format(mnt=mount_point)) else: run("chroot {mnt} update-grub -y".format(mnt=mount_point)) run("sed -i 's/^# groot.*/# groot=(hd0)/g' " "{mnt}/boot/grub/menu.lst".format(mnt=mount_point)) run("chroot {mnt} update-grub".format(mnt=mount_point)) else: run('ln -s grub.conf %s/boot/grub/menu.lst' % mount_point) run('ln -s ../boot/grub/grub.conf %s/etc/grub.conf' % mount_point) if config.get('kernel_package') == 'kernel-PAE': run('sed -i s/@VERSION@/`chroot %s rpm -q ' '--queryformat "%%{version}-%%{release}.%%{arch}.PAE" ' '%s | tail -n1`/g %s/boot/grub/grub.conf' % (mount_point, config.get('kernel_package', 'kernel'), mount_point)) else: run('sed -i s/@VERSION@/`chroot %s rpm -q ' '--queryformat "%%{version}-%%{release}.%%{arch}" ' '%s | tail -n1`/g %s/boot/grub/grub.conf' % (mount_point, config.get('kernel_package', 'kernel'), mount_point)) if config.get("root_device_type") == "instance-store": # files normally copied by grub-install run("cp -va /usr/share/grub/x86_64-redhat/* /mnt/boot/grub/") put(os.path.join(config_dir, "grub.cmd"), "/tmp/grub.cmd") run("sed -i s/@IMG@/{}/g /tmp/grub.cmd".format(img_file)) run("cat /tmp/grub.cmd | grub --device-map=/dev/null") elif virtualization_type == "hvm": # See https://bugs.archlinux.org/task/30241 for the details, # grub-nstall doesn't handle /dev/xvd* devices properly grub_install_patch = os.path.join(config_dir, "grub-install.diff") if os.path.exists(grub_install_patch): put(grub_install_patch, "/tmp/grub-install.diff") run('which patch >/dev/null || yum -d 1 install -y patch') run('patch -p0 -i /tmp/grub-install.diff /sbin/grub-install') run("grub-install --root-directory=%s --no-floppy %s" % (mount_point, grub_dev)) run("sed -i -e '/PermitRootLogin/d' -e '/UseDNS/d' " "-e '$ a PermitRootLogin without-password' " "-e '$ a UseDNS no' " "%s/etc/ssh/sshd_config" % mount_point) if config.get('distro') in ('debian', 'ubuntu'): pass else: manage_service("network", mount_point, "on") manage_service("rc.local", mount_point, "on") if config.get("root_device_type") == "instance-store" and \ config.get("distro") == "centos": instance_data = instance_data.copy() instance_data['name'] = host_instance.tags.get("Name") instance_data['hostname'] = host_instance.tags.get("FQDN") run("cp /etc/resolv.conf {}/etc/resolv.conf".format(mount_point)) # make puppet happy # disable ipv6 run("/sbin/service ip6tables stop") # mount /dev to let sshd start run('mount -o bind /dev %s/dev' % mount_point) assimilate_instance(host_instance, instance_config, ssh_key, instance_data, deploypass, chroot=mount_point, reboot=False) ami_cleanup(mount_point=mount_point, distro=config["distro"]) # kill chroot processes put('%s/kill_chroot.sh' % AMI_CONFIGS_DIR, '/tmp/kill_chroot.sh') run('bash /tmp/kill_chroot.sh {}'.format(mount_point)) run('swapoff -a') run('umount %s/dev || :' % mount_point) if config.get("distro") == "ubuntu": run('rm -f %s/usr/sbin/policy-rc.d' % mount_point) run('chroot %s ln -s /sbin/MAKEDEV /dev/' % mount_point) for dev in ('zero', 'null', 'console', 'generic'): run('chroot %s sh -c "cd /dev && ./MAKEDEV %s"' % (mount_point, dev)) run('umount %s/sys || :' % mount_point) run('umount %s/proc || :' % mount_point) run('umount %s/dev || :' % mount_point) run('umount %s/boot || :' % mount_point) run('umount %s' % mount_point) if config.get("root_device_type") == "instance-store" \ and config.get("distro") == "centos": # create bundle run("yum -d 1 install -y ruby " "http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.noarch.rpm") bundle_location = "{b}/{d}/{t}/{n}".format( b=config["bucket"], d=config["bucket_dir"], t=config["target"]["tags"]["moz-type"], n=dated_target_name) manifest_location = "{}/{}.manifest.xml".format(bundle_location, dated_target_name) run("mkdir -p /mnt-tmp/out") put(cert, "/mnt-tmp/cert.pem") put(pkey, "/mnt-tmp/pk.pem") run("ec2-bundle-image -c /mnt-tmp/cert.pem -k /mnt-tmp/pk.pem " "-u {uid} -i /mnt-tmp/{img_file} -d /mnt-tmp/out -r x86_64".format( img_file=img_file, uid=config["aws_user_id"])) with hide('running', 'stdout', 'stderr'): log.info("uploading bundle") run("ec2-upload-bundle -b {bundle_location}" " --access-key {access_key} --secret-key {secret_key}" " --region {region}" " -m /mnt-tmp/out/{img_file}.manifest.xml --retry".format( bundle_location=bundle_location, access_key=boto.config.get("Credentials", "aws_access_key_id"), secret_key=boto.config.get("Credentials", "aws_secret_access_key"), region=connection.region.name, img_file=img_file)) v.detach(force=True) wait_for_status(v, "status", "available", "update") if not config.get("root_device_type") == "instance-store": # Step 5: Create a snapshot log.info('Creating a snapshot') snapshot = v.create_snapshot(dated_target_name) wait_for_status(snapshot, "status", "completed", "update") snapshot.add_tag('Name', dated_target_name) snapshot.add_tag('moz-created', str(int(time.mktime(time.gmtime())))) # Step 6: Create an AMI log.info('Creating AMI') if config.get("root_device_type") == "instance-store": ami_id = connection.register_image( dated_target_name, '%s AMI' % dated_target_name, architecture=config['arch'], virtualization_type=virtualization_type, image_location=manifest_location, ) else: host_img = connection.get_image(config['ami']) block_map = BlockDeviceMapping() block_map[host_img.root_device_name] = BlockDeviceType( snapshot_id=snapshot.id) root_device_name = host_img.root_device_name if virtualization_type == "hvm": kernel_id = None ramdisk_id = None else: kernel_id = host_img.kernel_id ramdisk_id = host_img.ramdisk_id ami_id = connection.register_image( dated_target_name, '%s AMI' % dated_target_name, architecture=config['arch'], kernel_id=kernel_id, ramdisk_id=ramdisk_id, root_device_name=root_device_name, block_device_map=block_map, virtualization_type=virtualization_type, ) while True: try: ami = connection.get_image(ami_id) ami.add_tag('Name', dated_target_name) ami.add_tag('moz-created', str(int(time.mktime(time.gmtime())))) if config["target"].get("tags"): for tag, value in config["target"]["tags"].items(): log.info("Tagging %s: %s", tag, value) ami.add_tag(tag, value) log.info('AMI created') log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name)) break except: log.info('Wating for AMI') time.sleep(10) # Step 7: Cleanup if not args.keep_volume: log.info('Deleting volume') v.delete() if not args.keep_host_instance: log.info('Terminating host instance') host_instance.terminate() return ami
def create_ami(host_instance, options, config): connection = host_instance.connection env.host_string = host_instance.public_dns_name env.user = '******' env.abort_on_prompts = True env.disable_known_hosts = True target_name = options.config virtualization_type = config.get("virtualization_type") config_dir = "%s/%s" % (AMI_CONFIGS_DIR, target_name) dated_target_name = "%s-%s" % ( options.config, time.strftime("%Y-%m-%d-%H-%M", time.gmtime())) int_dev_name = config['target']['int_dev_name'] mount_dev = int_dev_name mount_point = config['target']['mount_point'] v = connection.create_volume(config['target']['size'], host_instance.placement) while True: try: v.attach(host_instance.id, config['target']['aws_dev_name']) break except: log.debug('hit error waiting for volume to be attached') time.sleep(10) wait_for_status(v, "status", "in-use", "update") while True: try: if run('ls %s' % int_dev_name).succeeded: break except: log.debug('hit error waiting for volume to be attached') time.sleep(10) # Step 0: install required packages if config.get('distro') not in ('debian', 'ubuntu'): run('which MAKEDEV >/dev/null || yum install -y MAKEDEV') # Step 1: prepare target FS run('mkdir -p %s' % mount_point) if virtualization_type == "hvm": # HVM based instances use EBS disks as raw disks. They are have to be # partitioned first. Additionally ,"1" should the appended to get the # first primary device name. mount_dev = "%s1" % mount_dev run('parted -s %s -- mklabel msdos' % int_dev_name) run('parted -s -a optimal %s -- mkpart primary ext2 0 -1s' % int_dev_name) run('parted -s %s -- set 1 boot on' % int_dev_name) run('/sbin/mkfs.{fs_type} {dev}'.format( fs_type=config['target']['fs_type'], dev=mount_dev)) run('/sbin/e2label {dev} {label}'.format( dev=mount_dev, label=config['target']['e2_label'])) run('mount {dev} {mount_point}'.format(dev=mount_dev, mount_point=mount_point)) run('mkdir {0}/dev {0}/proc {0}/etc'.format(mount_point)) if config.get('distro') not in ('debian', 'ubuntu'): run('mount -t proc proc %s/proc' % mount_point) run('for i in console null zero ; ' 'do /sbin/MAKEDEV -d %s/dev -x $i ; done' % mount_point) # Step 2: install base system if config.get('distro') in ('debian', 'ubuntu'): run('apt-get update') run('which debootstrap >/dev/null || apt-get install -y debootstrap') run('debootstrap precise %s http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu/' % mount_point) run('chroot %s mount -t proc none /proc' % mount_point) run('mount -o bind /dev %s/dev' % mount_point) put('%s/releng-public.list' % AMI_CONFIGS_DIR, '%s/etc/apt/sources.list' % mount_point) with lcd(config_dir): put('usr/sbin/policy-rc.d', '%s/usr/sbin/' % mount_point, mirror_local_mode=True) run('chroot %s apt-get update' % mount_point) run('DEBIAN_FRONTEND=text chroot {mnt} apt-get install -y ' 'ubuntu-desktop openssh-server makedev curl grub {kernel}'.format( mnt=mount_point, kernel=config['kernel_package'])) run('rm -f %s/usr/sbin/policy-rc.d' % mount_point) run('umount %s/dev' % mount_point) run('chroot %s ln -s /sbin/MAKEDEV /dev/' % mount_point) for dev in ('zero', 'null', 'console', 'generic'): run('chroot %s sh -c "cd /dev && ./MAKEDEV %s"' % (mount_point, dev)) run('chroot %s apt-get clean' % mount_point) else: with lcd(config_dir): put('etc/yum-local.cfg', '%s/etc/yum-local.cfg' % mount_point) put('groupinstall', '/tmp/groupinstall') put('additional_packages', '/tmp/additional_packages') yum = 'yum -c {0}/etc/yum-local.cfg -y --installroot={0} '.format( mount_point) run('%s groupinstall "`cat /tmp/groupinstall`"' % yum) run('%s install `cat /tmp/additional_packages`' % yum) run('%s clean packages' % yum) # Rebuild RPM DB for cases when versions mismatch run('chroot %s rpmdb --rebuilddb || :' % mount_point) # Step 3: upload custom configuration files run('chroot %s mkdir -p /boot/grub' % mount_point) with lcd(config_dir): for f in ('etc/rc.local', 'etc/fstab', 'etc/hosts', 'etc/sysconfig/network', 'etc/sysconfig/network-scripts/ifcfg-eth0', 'etc/init.d/rc.local', 'boot/grub/device.map', 'etc/network/interfaces', 'boot/grub/menu.lst', 'boot/grub/grub.conf'): if os.path.exists(os.path.join(config_dir, f)): put(f, '%s/%s' % (mount_point, f), mirror_local_mode=True) else: log.warn("Skipping %s", f) # Step 4: tune configs run('sed -i -e s/@ROOT_DEV_LABEL@/{label}/g -e s/@FS_TYPE@/{fs}/g ' '{mnt}/etc/fstab'.format(label=config['target']['e2_label'], fs=config['target']['fs_type'], mnt=mount_point)) if config.get('distro') in ('debian', 'ubuntu'): # sanity check run('ls -l %s/boot/vmlinuz-%s' % (mount_point, config['kernel_version'])) run('sed -i s/@VERSION@/%s/g %s/boot/grub/menu.lst' % (config['kernel_version'], mount_point)) else: run('ln -s grub.conf %s/boot/grub/menu.lst' % mount_point) run('ln -s ../boot/grub/grub.conf %s/etc/grub.conf' % mount_point) if config.get('kernel_package') == 'kernel-PAE': run('sed -i s/@VERSION@/`chroot %s rpm -q ' '--queryformat "%%{version}-%%{release}.%%{arch}.PAE" ' '%s | tail -n1`/g %s/boot/grub/grub.conf' % (mount_point, config.get('kernel_package', 'kernel'), mount_point)) else: run('sed -i s/@VERSION@/`chroot %s rpm -q ' '--queryformat "%%{version}-%%{release}.%%{arch}" ' '%s | tail -n1`/g %s/boot/grub/grub.conf' % (mount_point, config.get('kernel_package', 'kernel'), mount_point)) if virtualization_type == "hvm": # See https://bugs.archlinux.org/task/30241 for the details, # grub-nstall doesn't handle /dev/xvd* devices properly grub_install_patch = os.path.join(config_dir, "grub-install.diff") if os.path.exists(grub_install_patch): put(grub_install_patch, "/tmp/grub-install.diff") run('which patch >/dev/null || yum install -y patch') run('patch -p0 -i /tmp/grub-install.diff /sbin/grub-install') run("grub-install --root-directory=%s --no-floppy %s" % (mount_point, int_dev_name)) run("sed -i -e '/PermitRootLogin/d' -e '/UseDNS/d' " "-e '$ a PermitRootLogin without-password' " "-e '$ a UseDNS no' " "%s/etc/ssh/sshd_config" % mount_point) if config.get('distro') in ('debian', 'ubuntu'): pass else: manage_service("network", mount_point, "on") manage_service("rc.local", mount_point, "on") run('umount %s/proc || :' % mount_point) run('umount %s' % mount_point) v.detach() wait_for_status(v, "status", "available", "update") # Step 5: Create a snapshot log.info('Creating a snapshot') snapshot = v.create_snapshot('EBS-backed %s' % dated_target_name) wait_for_status(snapshot, "status", "completed", "update") snapshot.add_tag('Name', dated_target_name) # Step 6: Create an AMI log.info('Creating AMI') host_img = connection.get_image(config['ami']) block_map = BlockDeviceMapping() block_map[host_img.root_device_name] = BlockDeviceType( snapshot_id=snapshot.id) if virtualization_type == "hvm": kernel_id = None ramdisk_id = None else: kernel_id = host_img.kernel_id ramdisk_id = host_img.ramdisk_id ami_id = connection.register_image( dated_target_name, '%s EBS AMI' % dated_target_name, architecture=config['arch'], kernel_id=kernel_id, ramdisk_id=ramdisk_id, root_device_name=host_img.root_device_name, block_device_map=block_map, virtualization_type=virtualization_type, ) while True: try: ami = connection.get_image(ami_id) ami.add_tag('Name', dated_target_name) if config["target"].get("tags"): for tag, value in config["target"]["tags"].items(): log.info("Tagging %s: %s", tag, value) ami.add_tag(tag, value) log.info('AMI created') log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name)) break except: log.info('Wating for AMI') time.sleep(10) # Step 7: Cleanup if not options.keep_volume: log.info('Deleting volume') v.delete() if not options.keep_host_instance: log.info('Terminating host instance') host_instance.terminate() return ami
def create_ami(host_instance, args, config, instance_config, ssh_key, key_filename, instance_data, deploypass, cert, pkey, ami_name_prefix): connection = host_instance.connection setup_fabric_env(instance=host_instance, abort_on_prompts=True, disable_known_hosts=True, key_filename=key_filename) target_name = args.config virtualization_type = config.get("virtualization_type") config_dir = "%s/%s" % (AMI_CONFIGS_DIR, target_name) if ami_name_prefix: prefix = ami_name_prefix else: prefix = args.config dated_target_name = "{}-{}".format( prefix, time.strftime("%Y-%m-%d-%H-%M", time.gmtime())) int_dev_name = config['target']['int_dev_name'] mount_dev = int_dev_name grub_dev = int_dev_name mount_point = config['target']['mount_point'] boot_mount_dev = None host_packages_file = os.path.join(config_dir, "host_packages") packages_file = os.path.join(config_dir, "packages") if os.path.exists(host_packages_file): install_packages(host_packages_file, config.get('distro')) v = attach_and_wait(host_instance, config['target']['size'], config['target']['aws_dev_name'], int_dev_name) # Step 0: install required packages if config.get('distro') == "centos": run('which MAKEDEV >/dev/null || yum -d 1 install -y MAKEDEV') # Step 1: prepare target FS run('mkdir -p %s' % mount_point) if config.get("root_device_type") == "instance-store": # Use file image mount_dev = "/dev/cloud_root/lv_root" grub_dev = "/dev/loop0" boot_mount_dev = "/dev/mapper/loop0p1" img_file = dated_target_name partition_image(mount_dev=mount_dev, int_dev_name=int_dev_name, img_file=img_file) elif virtualization_type == "hvm": # use EBS volume mount_dev = "/dev/cloud_root/lv_root" boot_mount_dev = "%s1" % int_dev_name partition_ebs_volume(int_dev_name=int_dev_name) run('/sbin/mkfs.{fs_type} {args} {dev}'.format( fs_type=config['target']['fs_type'], args=config['target'].get("mkfs_args", ""), dev=mount_dev)) run('/sbin/e2label {dev} {label}'.format( dev=mount_dev, label=config['target']['e2_label'])) run('mount {dev} {mount_point}'.format(dev=mount_dev, mount_point=mount_point)) run('mkdir {0}/dev {0}/proc {0}/etc {0}/boot {0}/sys'.format(mount_point)) run('mount -t sysfs sys %s/sys' % mount_point) if config.get('distro') not in ('debian', 'ubuntu'): run('mount -t proc proc %s/proc' % mount_point) run('for i in console null zero random urandom; ' 'do /sbin/MAKEDEV -d %s/dev -x $i ; done' % mount_point) if boot_mount_dev: run('mount {} {}/boot'.format(boot_mount_dev, mount_point)) # Step 2: install base system if config.get('distro') in ('debian', 'ubuntu'): run("debootstrap precise %s " "http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu/" % mount_point) run('chroot %s mount -t proc none /proc' % mount_point) run('mount -o bind /dev %s/dev' % mount_point) put('%s/releng-public.list' % AMI_CONFIGS_DIR, '%s/etc/apt/sources.list' % mount_point) with lcd(config_dir): put('usr/sbin/policy-rc.d', '%s/usr/sbin/' % mount_point, mirror_local_mode=True) install_packages(packages_file, config.get('distro'), chroot=mount_point) else: with lcd(config_dir): put('etc/yum-local.cfg', '%s/etc/yum-local.cfg' % mount_point) yum = 'yum -d 1 -c {0}/etc/yum-local.cfg -y --installroot={0} '.format( mount_point) # this groupinstall emulates the %packages section of the kickstart # config, which defaults to Core and Base. run('%s groupinstall Core Base' % yum) run('%s clean packages' % yum) # Rebuild RPM DB for cases when versions mismatch run('chroot %s rpmdb --rebuilddb || :' % mount_point) # Step 3: upload custom configuration files run('chroot %s mkdir -p /boot/grub' % mount_point) for directory in ('boot', 'etc', 'usr'): local_directory = os.path.join(config_dir, directory) remote_directory = os.path.join(mount_point, directory) if not os.path.exists(local_directory): pass sync(local_directory, remote_directory) # Step 4: tune configs run('sed -i -e s/@ROOT_DEV_LABEL@/{label}/g -e s/@FS_TYPE@/{fs}/g ' '{mnt}/etc/fstab'.format(label=config['target']['e2_label'], fs=config['target']['fs_type'], mnt=mount_point)) if config.get('distro') in ('debian', 'ubuntu'): if virtualization_type == "hvm": run("chroot {mnt} grub-install {int_dev_name}".format( mnt=mount_point, int_dev_name=int_dev_name)) run("chroot {mnt} update-grub".format(mnt=mount_point)) else: run("chroot {mnt} update-grub -y".format(mnt=mount_point)) run("sed -i 's/^# groot.*/# groot=(hd0)/g' " "{mnt}/boot/grub/menu.lst".format(mnt=mount_point)) run("chroot {mnt} update-grub".format(mnt=mount_point)) else: run('ln -s grub.conf %s/boot/grub/menu.lst' % mount_point) run('ln -s ../boot/grub/grub.conf %s/etc/grub.conf' % mount_point) if config.get('kernel_package') == 'kernel-PAE': run('sed -i s/@VERSION@/`chroot %s rpm -q ' '--queryformat "%%{version}-%%{release}.%%{arch}.PAE" ' '%s | tail -n1`/g %s/boot/grub/grub.conf' % (mount_point, config.get('kernel_package', 'kernel'), mount_point)) else: run('sed -i s/@VERSION@/`chroot %s rpm -q ' '--queryformat "%%{version}-%%{release}.%%{arch}" ' '%s | tail -n1`/g %s/boot/grub/grub.conf' % (mount_point, config.get('kernel_package', 'kernel'), mount_point)) if config.get("root_device_type") == "instance-store": # files normally copied by grub-install run("cp -va /usr/share/grub/x86_64-redhat/* /mnt/boot/grub/") put(os.path.join(config_dir, "grub.cmd"), "/tmp/grub.cmd") run("sed -i s/@IMG@/{}/g /tmp/grub.cmd".format(img_file)) run("cat /tmp/grub.cmd | grub --device-map=/dev/null") elif virtualization_type == "hvm": # See https://bugs.archlinux.org/task/30241 for the details, # grub-nstall doesn't handle /dev/xvd* devices properly grub_install_patch = os.path.join(config_dir, "grub-install.diff") if os.path.exists(grub_install_patch): put(grub_install_patch, "/tmp/grub-install.diff") run('which patch >/dev/null || yum -d 1 install -y patch') run('patch -p0 -i /tmp/grub-install.diff /sbin/grub-install') run("grub-install --root-directory=%s --no-floppy %s" % (mount_point, grub_dev)) run("sed -i -e '/PermitRootLogin/d' -e '/UseDNS/d' " "-e '$ a PermitRootLogin without-password' " "-e '$ a UseDNS no' " "%s/etc/ssh/sshd_config" % mount_point) if config.get('distro') in ('debian', 'ubuntu'): pass else: manage_service("network", mount_point, "on") manage_service("rc.local", mount_point, "on") if config.get("root_device_type") == "instance-store" and \ config.get("distro") == "centos": instance_data = instance_data.copy() instance_data['name'] = host_instance.tags.get("Name") instance_data['hostname'] = host_instance.tags.get("FQDN") run("cp /etc/resolv.conf {}/etc/resolv.conf".format(mount_point)) # make puppet happy # disable ipv6 run("/sbin/service ip6tables stop") # mount /dev to let sshd start run('mount -o bind /dev %s/dev' % mount_point) assimilate_instance(host_instance, instance_config, ssh_key, instance_data, deploypass, chroot=mount_point, reboot=False) ami_cleanup(mount_point=mount_point, distro=config["distro"]) # kill chroot processes put('%s/kill_chroot.sh' % AMI_CONFIGS_DIR, '/tmp/kill_chroot.sh') run('bash /tmp/kill_chroot.sh {}'.format(mount_point)) run('swapoff -a') run('umount %s/dev || :' % mount_point) if config.get("distro") == "ubuntu": run('rm -f %s/usr/sbin/policy-rc.d' % mount_point) run('chroot %s ln -s /sbin/MAKEDEV /dev/' % mount_point) for dev in ('zero', 'null', 'console', 'generic'): run('chroot %s sh -c "cd /dev && ./MAKEDEV %s"' % (mount_point, dev)) run('umount %s/sys || :' % mount_point) run('umount %s/proc || :' % mount_point) run('umount %s/dev || :' % mount_point) run('umount %s/boot || :' % mount_point) run('umount %s' % mount_point) if config.get("root_device_type") == "instance-store" \ and config.get("distro") == "centos": # create bundle run("yum -d 1 install -y ruby " "http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.noarch.rpm") bundle_location = "{b}/{d}/{t}/{n}".format( b=config["bucket"], d=config["bucket_dir"], t=config["target"]["tags"]["moz-type"], n=dated_target_name) manifest_location = "{}/{}.manifest.xml".format(bundle_location, dated_target_name) run("mkdir -p /mnt-tmp/out") put(cert, "/mnt-tmp/cert.pem") put(pkey, "/mnt-tmp/pk.pem") run("ec2-bundle-image -c /mnt-tmp/cert.pem -k /mnt-tmp/pk.pem " "-u {uid} -i /mnt-tmp/{img_file} -d /mnt-tmp/out -r x86_64".format( img_file=img_file, uid=config["aws_user_id"])) with hide('running', 'stdout', 'stderr'): log.info("uploading bundle") run("ec2-upload-bundle -b {bundle_location}" " --access-key {access_key} --secret-key {secret_key}" " --region {region}" " -m /mnt-tmp/out/{img_file}.manifest.xml --retry".format( bundle_location=bundle_location, access_key=boto.config.get("Credentials", "aws_access_key_id"), secret_key=boto.config.get("Credentials", "aws_secret_access_key"), region=connection.region.name, img_file=img_file)) v.detach(force=True) wait_for_status(v, "status", "available", "update") if not config.get("root_device_type") == "instance-store": # Step 5: Create a snapshot log.info('Creating a snapshot') snapshot = v.create_snapshot(dated_target_name) wait_for_status(snapshot, "status", "completed", "update") snapshot.add_tag('Name', dated_target_name) snapshot.add_tag('moz-created', str(int(time.mktime(time.gmtime())))) # Step 6: Create an AMI log.info('Creating AMI') if config.get("root_device_type") == "instance-store": ami_id = connection.register_image( dated_target_name, '%s AMI' % dated_target_name, architecture=config['arch'], virtualization_type=virtualization_type, image_location=manifest_location, ) else: host_img = connection.get_image(config['ami']) block_map = BlockDeviceMapping() block_map[host_img.root_device_name] = BlockDeviceType( snapshot_id=snapshot.id) root_device_name = host_img.root_device_name if virtualization_type == "hvm": kernel_id = None ramdisk_id = None else: kernel_id = host_img.kernel_id ramdisk_id = host_img.ramdisk_id ami_id = connection.register_image( dated_target_name, '%s AMI' % dated_target_name, architecture=config['arch'], kernel_id=kernel_id, ramdisk_id=ramdisk_id, root_device_name=root_device_name, block_device_map=block_map, virtualization_type=virtualization_type, ) while True: try: ami = connection.get_image(ami_id) ami.add_tag('Name', dated_target_name) ami.add_tag('moz-created', str(int(time.mktime(time.gmtime())))) if config["target"].get("tags"): for tag, value in config["target"]["tags"].items(): log.info("Tagging %s: %s", tag, value) ami.add_tag(tag, value) log.info('AMI created') log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name)) break except: log.info('Wating for AMI') time.sleep(10) # Step 7: Cleanup if not args.keep_volume: log.info('Deleting volume') v.delete() if not args.keep_host_instance: log.info('Terminating host instance') host_instance.terminate() return ami
def create_instance(name, config, region, key_name, instance_data, deploypass, loaned_to, loan_bug): """Creates an AMI instance with the given name and config. The config must specify things like ami id.""" conn = get_aws_connection(region) vpc = get_vpc(region) # Make sure we don't request the same things twice token = str(uuid.uuid4())[:16] instance_data = instance_data.copy() instance_data['name'] = name instance_data['hostname'] = '{name}.{domain}'.format( name=name, domain=config['domain']) ami = conn.get_all_images(image_ids=[config["ami"]])[0] bdm = None if 'device_map' in config: bdm = BlockDeviceMapping() for device, device_info in config['device_map'].items(): bd = BlockDeviceType() if device_info.get('size'): bd.size = device_info['size'] # Overwrite root device size for HVM instances, since they cannot # be resized online if ami.virtualization_type == "hvm" and \ ami.root_device_name == device: bd.size = ami.block_device_mapping[ami.root_device_name].size if device_info.get("delete_on_termination") is not False: bd.delete_on_termination = True if device_info.get("ephemeral_name"): bd.ephemeral_name = device_info["ephemeral_name"] bdm[device] = bd ip_address = get_ip(instance_data['hostname']) subnet_id = None if ip_address: s_id = get_subnet_id(vpc, ip_address) if s_id in config['subnet_ids']: if ip_available(conn, ip_address): subnet_id = s_id else: log.warning("%s already assigned" % ip_address) if not ip_address or not subnet_id: ip_address = None subnet_id = choice(config.get('subnet_ids')) interface = NetworkInterfaceSpecification( subnet_id=subnet_id, private_ip_address=ip_address, delete_on_termination=True, groups=config.get('security_group_ids', []), associate_public_ip_address=config.get("use_public_ip") ) interfaces = NetworkInterfaceCollection(interface) while True: try: reservation = conn.run_instances( image_id=config['ami'], key_name=key_name, instance_type=config['instance_type'], block_device_map=bdm, client_token=token, disable_api_termination=bool(config.get('disable_api_termination')), network_interfaces=interfaces, instance_profile_name=config.get("instance_profile_name"), ) break except boto.exception.BotoServerError: log.exception("Cannot start an instance") time.sleep(10) instance = reservation.instances[0] log.info("instance %s created, waiting to come up", instance) # Wait for the instance to come up wait_for_status(instance, "state", "running", "update") instance.add_tag('Name', name) instance.add_tag('FQDN', instance_data['hostname']) instance.add_tag('created', time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime())) instance.add_tag('moz-type', config['type']) if loaned_to: instance.add_tag("moz-loaned-to", loaned_to) if loan_bug: instance.add_tag("moz-bug", loan_bug) log.info("assimilating %s", instance) instance.add_tag('moz-state', 'pending') while True: try: assimilate(instance.private_ip_address, config, instance_data, deploypass) break except: log.warn("problem assimilating %s (%s), retrying in 10 sec ...", instance_data['hostname'], instance.id) time.sleep(10) instance.add_tag('moz-state', 'ready')
def main(): parser = argparse.ArgumentParser() parser.add_argument("-r", "--region", dest="region", required=True, help="Region") parser.add_argument("-q", "--quiet", action="store_true", help="Supress logging messages") parser.add_argument("-c", "--ami-config", required=True, help="AMI config") parser.add_argument("-i", "--instance-config", required=True, help="Instance config") parser.add_argument("--ssh-key", required=True, help="SSH key name") parser.add_argument("--user", help="Login name") parser.add_argument("--public", action="store_true", default=False, help="Generate a public AMI (no secrets)") args = parser.parse_args() try: ami_config = json.load( open("%s/%s.json" % (AMI_CONFIGS_DIR, args.ami_config)) )[args.region] moz_type_config = json.load( open("%s/%s" % (INSTANCE_CONFIGS_DIR, args.instance_config)) )[args.region] except KeyError: parser.error("unknown configuration") logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s") if not args.quiet: log.setLevel(logging.DEBUG) else: log.setLevel(logging.ERROR) conn = get_aws_connection(args.region) dated_target_name = "spot-%s-%s" % ( args.ami_config, time.strftime("%Y-%m-%d-%H-%M", time.gmtime())) filters = { "tag:moz-state": "ready", "instance-state-name": "stopped" } for tag, value in moz_type_config["tags"].iteritems(): filters["tag:%s" % tag] = value using_stopped_instance = True res = conn.get_all_instances(filters=filters) if not res: filters["instance-state-name"] = "running" res = conn.get_all_instances(filters=filters) using_stopped_instance = False instances = reduce(lambda a, b: a + b, [r.instances for r in res]) # skip loaned instances instances = [i for i in instances if not i.tags.get("moz-loaned-to")] i = sorted(instances, key=lambda i: i.launch_time)[-1] log.debug("Selected instance to clone: %s", i) v_id = i.block_device_mapping[i.root_device_name].volume_id v = conn.get_all_volumes(volume_ids=[v_id])[0] snap1 = v.create_snapshot("temporary snapshot of %s" % v_id) wait_for_status(snap1, "status", "completed", "update") host_instance = run_instance( connection=conn, instance_name="tmp", config=ami_config, key_name=args.ssh_key, user=args.user, subnet_id=random.choice(moz_type_config["subnet_ids"])) env.host_string = host_instance.private_ip_address env.user = '******' env.abort_on_prompts = True env.disable_known_hosts = True int_dev_name = ami_config['target']['int_dev_name'] mount_dev = int_dev_name mount_point = ami_config['target']['mount_point'] virtualization_type = ami_config.get("virtualization_type") if virtualization_type == "hvm": mount_dev = "%s1" % mount_dev tmp_v = conn.create_volume(size=snap1.volume_size, zone=host_instance.placement, snapshot=snap1) wait_for_status(tmp_v, "status", "available", "update") while True: try: tmp_v.attach(host_instance.id, ami_config['target']['aws_dev_name']) break except: log.debug('hit error waiting for volume to be attached') time.sleep(10) while True: try: tmp_v.update() if tmp_v.status == 'in-use': if run('ls %s' % int_dev_name).succeeded: break except: log.debug('hit error waiting for volume to be attached') time.sleep(10) run('mkdir -p %s' % mount_point) run('mount {dev} {mount_point}'.format(dev=mount_dev, mount_point=mount_point)) with cd(mount_point): run("rm -f root/*.sh") run("rm -f root/*.log") run("rm -f root/userdata") run("rm -f root/*.done") run("rm -f etc/spot_setup.done") run("rm -f var/lib/puppet/ssl/private_keys/*") run("rm -f var/lib/puppet/ssl/certs/*") if not using_stopped_instance or args.public: run("rm -rf builds/slave") else: run("rm -f builds/slave/buildbot.tac") run("echo localhost > etc/hostname") run("sed -i -e 's/127.0.0.1.*/127.0.0.1 localhost/g' etc/hosts") if args.public: # put rc.local put("%s/%s/etc/rc.local" % (AMI_CONFIGS_DIR, args.ami_config), "etc/rc.local", mirror_local_mode=True) run("rm -rf home/cltbld/.ssh") run("rm -rf root/.ssh/*") run("rm -rf builds/gapi.data") run("rm -rf builds/mock_mozilla/*/root/home/mock_mozilla") else: put("%s/spot_setup.sh" % AMI_CONFIGS_DIR, "etc/spot_setup.sh", mirror_local_mode=True) # replace puppet init with our script if ami_config["distro"] == "ubuntu": put("%s/spot_setup.conf" % AMI_CONFIGS_DIR, "etc/init/puppet.conf", mirror_local_mode=True) else: run("echo '/etc/spot_setup.sh' > etc/init.d/puppet") # create snapshot2 log.info('Terminating %s', host_instance) host_instance.terminate() wait_for_status(tmp_v, "status", "available", "update") log.info('Creating a snapshot') snap2 = tmp_v.create_snapshot(dated_target_name) wait_for_status(snap2, "status", "completed", "update") snap2.add_tag("Name", dated_target_name) bdm = BlockDeviceMapping() bdm[i.root_device_name] = BlockDeviceType(snapshot_id=snap2.id) log.info('Creating AMI') if virtualization_type == "hvm": kernel_id = None else: kernel_id = i.kernel ami_id = conn.register_image( dated_target_name, dated_target_name, architecture=ami_config["arch"], kernel_id=kernel_id, root_device_name=i.root_device_name, block_device_map=bdm, virtualization_type=virtualization_type, ) log.info('Waiting...') while True: try: ami = conn.get_image(ami_id) ami.add_tag('Name', dated_target_name) ami.add_tag('moz-created', int(time.mktime(time.gmtime()))) for tag, value in moz_type_config["tags"].iteritems(): ami.add_tag(tag, value) log.info('AMI created') log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name)) break except: log.info('Wating for AMI') time.sleep(10) # Step 7: Cleanup log.info('Cleanup...') tmp_v.delete() snap1.delete()