コード例 #1
0
ファイル: test_blockdevicemapping.py プロジェクト: 2mind/boto
class BlockDeviceTypeTests(unittest.TestCase):
    def setUp(self):
        self.block_device_type = BlockDeviceType()

    def check_that_attribute_has_been_set(self, name, value, attribute):
        self.block_device_type.endElement(name, value, None)
        self.assertEqual(getattr(self.block_device_type, attribute), value)

    def test_endElement_sets_correct_attributes_with_values(self):
        for arguments in [("volumeId", 1, "volume_id"),
                          ("virtualName", "some name", "ephemeral_name"),
                          ("snapshotId", 1, "snapshot_id"),
                          ("volumeSize", 1, "size"),
                          ("status", "some status", "status"),
                          ("attachTime", 1, "attach_time"),
                          ("somethingRandom", "somethingRandom", "somethingRandom")]:
            self.check_that_attribute_has_been_set(arguments[0], arguments[1], arguments[2])

    def test_endElement_with_name_NoDevice_value_true(self):
        self.block_device_type.endElement("NoDevice", 'true', None)
        self.assertEqual(self.block_device_type.no_device, True)

    def test_endElement_with_name_NoDevice_value_other(self):
        self.block_device_type.endElement("NoDevice", 'something else', None)
        self.assertEqual(self.block_device_type.no_device, False)

    def test_endElement_with_name_deleteOnTermination_value_true(self):
        self.block_device_type.endElement("deleteOnTermination", "true", None)
        self.assertEqual(self.block_device_type.delete_on_termination, True)

    def test_endElement_with_name_deleteOnTermination_value_other(self):
        self.block_device_type.endElement("deleteOnTermination", 'something else', None)
        self.assertEqual(self.block_device_type.delete_on_termination, False)
コード例 #2
0
ファイル: ec2_helper.py プロジェクト: ICRAR/aws-chiles02
    def build_block_device_map(ephemeral, number_ephemeral_disks=1, ebs_size=None, iops=None, number_ebs_volumes=1):
        bdm = blockdevicemapping.BlockDeviceMapping()

        if ephemeral:
            # The ephemeral disk
            xvdb = BlockDeviceType()
            xvdb.ephemeral_name = 'ephemeral0'
            bdm['/dev/xvdb'] = xvdb

            if number_ephemeral_disks == 2:
                xvdc = BlockDeviceType()
                xvdc.ephemeral_name = 'ephemeral1'
                bdm['/dev/xvdc'] = xvdc

        if ebs_size:
            for disks in range(0, number_ebs_volumes):
                xvd_n = blockdevicemapping.EBSBlockDeviceType(delete_on_termination=True)
                xvd_n.size = int(ebs_size)  # size in Gigabytes
                if iops:
                    xvd_n.iops = 500
                    xvd_n.volume_type = 'io1'
                else:
                    xvd_n.volume_type = 'gp2'
                last_char = chr(ord('f') + disks)
                bdm['/dev/xvd' + last_char] = xvd_n

        return bdm
コード例 #3
0
ファイル: snapshots.py プロジェクト: aliceh/eucaconsole
 def snapshot_register(self):
     snapshot_id = self.snapshot.id
     name = self.request.params.get('name')
     description = self.request.params.get('description')
     dot = self.request.params.get('dot')
     reg_as_windows = self.request.params.get('reg_as_windows')
     root_vol = BlockDeviceType(snapshot_id=snapshot_id)
     root_vol.delete_on_termination = dot
     bdm = BlockDeviceMapping()
     root_device_name = '/dev/sda' if self.cloud_type == 'euca' else '/dev/sda1'
     bdm[root_device_name] = root_vol
     location = self.request.route_path('snapshot_view', id=snapshot_id)
     if self.snapshot and self.register_form.validate():
         with boto_error_handler(self.request, location):
             self.log_request(_(u"Registering snapshot {0} as image {1}").format(snapshot_id, name))
             self.snapshot.connection.register_image(
                 name=name, description=description,
                 root_device_name=root_device_name,
                 kernel_id=('windows' if reg_as_windows else None),
                 block_device_map=bdm)
             prefix = _(u'Successfully registered snapshot')
             msg = u'{prefix} {id}'.format(prefix=prefix, id=snapshot_id)
             # Clear images cache
             self.invalidate_images_cache()
             self.request.session.flash(msg, queue=Notification.SUCCESS)
         return HTTPFound(location=location)
     return self.render_dict
コード例 #4
0
ファイル: ec2ops.py プロジェクト: anthony198/eutester
 def register_snapshot_by_id( self, snap_id, rdn="/dev/sda1", description="bfebs", windows=False, bdmdev=None, name=None, ramdisk=None, kernel=None, dot=True ):
     '''
     Register an image snapshot
     snap_id        (mandatory string) snapshot id
     name           (mandatory string) name of image to be registered
     description    (optional string) description of image to be registered
     bdmdev         (optional string) block-device-mapping device for image
     rdn            (optional string) root-device-name for image
     dot            (optional boolean) Delete On Terminate boolean
     windows        (optional boolean) Is windows image boolean
     kernel         (optional string) kernal (note for windows this name should be "windows"
     '''
     
     if (bdmdev is None):
         bdmdev=rdn
     if (name is None):
         name="bfebs_"+ snap_id
     if ( windows is True ) and ( kernel is not None):
         kernel="windows"     
         
     bdmap = BlockDeviceMapping()
     block_dev_type = BlockDeviceType()
     block_dev_type.snapshot_id = snap_id
     block_dev_type.delete_on_termination = dot
     bdmap[bdmdev] = block_dev_type
         
     self.debug("Register image with: snap_id:"+str(snap_id)+", rdn:"+str(rdn)+", desc:"+str(description)+", windows:"+str(windows)+", bdname:"+str(bdmdev)+", name:"+str(name)+", ramdisk:"+str(ramdisk)+", kernel:"+str(kernel))
     image_id = self.ec2.register_image(name=name, description=description, kernel_id=kernel, ramdisk_id=ramdisk, block_device_map=bdmap, root_device_name=rdn)
     self.debug("Image now registered as " + image_id)
     return image_id
コード例 #5
0
def run_instance(conn, ami_id, key_name, instance_type, 
        sec_group, zone="us-east-1d", vol_size=None):
    """
    @param connection: python boto connection
    @param ami_id: AMI ID
    @param key_name: SSH key name
    @param instance_type: instance type, example 'm1.large'
    @param sec_group: security group
    @param zone: optional, defaults to 'us-east-1d'
    @param vol_size: optional integer, if specified will change size of root volume to this size
    
    @return boto.ec2.instance.Instance
    """
    bdm = None
    if vol_size:
        # Create block device mapping info
        dev_sda1 = BlockDeviceType()
        dev_sda1.size = int(vol_size)
        dev_sda1.delete_on_termination = True
        bdm = BlockDeviceMapping()
        bdm['/dev/sda1'] = dev_sda1

    # Run instance
    reservation = conn.run_instances(
            ami_id,
            key_name=key_name,
            instance_type=instance_type,
            placement=zone,
            instance_initiated_shutdown_behavior="stop",
            security_groups=[sec_group],
            block_device_map=bdm)
    return reservation.instances[0]
コード例 #6
0
def test_create_launch_configuration_with_block_device_mappings():
    block_device_mapping = BlockDeviceMapping()

    ephemeral_drive = BlockDeviceType()
    ephemeral_drive.ephemeral_name = 'ephemeral0'
    block_device_mapping['/dev/xvdb'] = ephemeral_drive

    snapshot_drive = BlockDeviceType()
    snapshot_drive.snapshot_id = "snap-1234abcd"
    snapshot_drive.volume_type = "standard"
    block_device_mapping['/dev/xvdp'] = snapshot_drive

    ebs_drive = BlockDeviceType()
    ebs_drive.volume_type = "io1"
    ebs_drive.size = 100
    ebs_drive.iops = 1000
    ebs_drive.delete_on_termination = False
    block_device_mapping['/dev/xvdh'] = ebs_drive

    conn = boto.connect_autoscale(use_block_device_types=True)
    config = LaunchConfiguration(
        name='tester',
        image_id='ami-abcd1234',
        instance_type='m1.small',
        key_name='the_keys',
        security_groups=["default", "default2"],
        user_data="This is some user_data",
        instance_monitoring=True,
        instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing',
        spot_price=0.1,
        block_device_mappings=[block_device_mapping]
    )
    conn.create_launch_configuration(config)

    launch_config = conn.get_all_launch_configurations()[0]
    launch_config.name.should.equal('tester')
    launch_config.image_id.should.equal('ami-abcd1234')
    launch_config.instance_type.should.equal('m1.small')
    launch_config.key_name.should.equal('the_keys')
    set(launch_config.security_groups).should.equal(set(['default', 'default2']))
    launch_config.user_data.should.equal("This is some user_data")
    launch_config.instance_monitoring.enabled.should.equal('true')
    launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing')
    launch_config.spot_price.should.equal(0.1)
    len(launch_config.block_device_mappings).should.equal(3)

    returned_mapping = launch_config.block_device_mappings

    set(returned_mapping.keys()).should.equal(set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh']))

    returned_mapping['/dev/xvdh'].iops.should.equal(1000)
    returned_mapping['/dev/xvdh'].size.should.equal(100)
    returned_mapping['/dev/xvdh'].volume_type.should.equal("io1")
    returned_mapping['/dev/xvdh'].delete_on_termination.should.be.false

    returned_mapping['/dev/xvdp'].snapshot_id.should.equal("snap-1234abcd")
    returned_mapping['/dev/xvdp'].volume_type.should.equal("standard")

    returned_mapping['/dev/xvdb'].ephemeral_name.should.equal('ephemeral0')
コード例 #7
0
def create_mapping(config):
    if 'mapping' not in config:
        return None
    mapping = BlockDeviceMapping()
    for ephemeral_name, device_path in config['mapping'].iteritems():
        ephemeral = BlockDeviceType()
        ephemeral.ephemeral_name = ephemeral_name
        mapping[device_path] = ephemeral
    return mapping
コード例 #8
0
ファイル: makeami.py プロジェクト: fbacchella/oscmd
 def vol2ami(self, volume_id, name, description=None):
     snap = self.ctxt.cnx_ec2.create_snapshot(volume_id, 'boto snapshot for %s' % name)
     block_map = BlockDeviceMapping() 
     sda = BlockDeviceType() 
     sda.snapshot_id = snap.id
     sda.ebs = True
     root_device_name = '/dev/sda1'
     block_map[root_device_name] = sda
     return self.ctxt.cnx_ec2.register_image(name=name, architecture='x86_64', root_device_name=root_device_name, block_device_map=block_map, description=description)
コード例 #9
0
ファイル: functions.py プロジェクト: joshwilliams/autobench
def spotrequest(ec2, nodetype, count, testname, testdate, threads, userdata = None):
    global selected_availability_zone

    maxprice = "0.40"
    if nodetype == 'client':
        instancetype = 'c3.xlarge'
    else:
        # For data nodes
        instancetype = 'c3.4xlarge'

    # Allow an explicit selection if needed...
    #selected_availability_zone = "us-east-1e"
    if not selected_availability_zone:
        selected_availability_zone = random.choice([
            'us-east-1a',
            #'us-east-1b',
            'us-east-1d',
            'us-east-1e',
        ])
    availability_zone = selected_availability_zone

    if userdata == None:
        userdata = """#!/bin/bash
echo {0} > /etc/node_testname
echo {1} > /etc/node_testdate
echo {2} > /etc/node_threads
echo {3} > /etc/node_role
#echo 10.136.71.116 > /etc/node_headnode
echo 400 > /etc/node_swap             # MB of swap created
echo 1 > /etc/node_mongo_uselocal     # Use local mongos shard server on each client
""".format(testname, testdate, threads, nodetype)

    # For some tests we may not need any nodes of this type
    if count == 0:
        return []

    # Default AMI
    ami = 'ami-XXXXXXXX' # Current versions

    # Specify ephemeral block devices...

    bdmap = BlockDeviceMapping()
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    bdmap['/dev/sdb'] = sdb
    sdc = BlockDeviceType()
    sdc.ephemeral_name = 'ephemeral1'
    bdmap['/dev/sdc'] = sdc
    #sdd = BlockDeviceType()
    #sdd.ephemeral_name = 'ephemeral2'
    #bdmap['/dev/sdd'] = sdd
    #sde = BlockDeviceType()
    #sde.ephemeral_name = 'ephemeral3'
    #bdmap['/dev/sde'] = sde

    return ec2.request_spot_instances(maxprice, ami, count=count, launch_group=testdate, availability_zone_group=testdate, security_groups=['epstatic'], user_data=userdata, instance_type=instancetype, block_device_map=bdmap)
コード例 #10
0
def _create_device_map(ephemeral_disk_count):
  """Creates a block device out of the ephemeral disks on this instance."""
  device_map = BlockDeviceMapping()
  device_paths = _get_device_paths(ephemeral_disk_count)

  for index, device_path in enumerate(device_paths):
    device = BlockDeviceType()
    device.ephemeral_name = "ephemeral{}".format(index)
    device_map[device_path] = device

  return device_map
コード例 #11
0
    def __call__(self, config, sectionname):
        from boto.ec2.blockdevicemapping import BlockDeviceMapping
        from boto.ec2.blockdevicemapping import BlockDeviceType

        value = BaseMassager.__call__(self, config, sectionname)
        device_map = BlockDeviceMapping()
        for mapping in value.split():
            device_path, ephemeral_name = mapping.split(':')
            device = BlockDeviceType()
            device.ephemeral_name = ephemeral_name
            device_map[device_path] = device
        return device_map
コード例 #12
0
ファイル: cli.py プロジェクト: spulec/autoscaler
def _parse_block_device_mappings(user_input):
    """
    Parse block device mappings per AWS CLI tools syntax (modified to add IOPS)

    http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html

    Syntax:
    /dev/xvd[a-z]=[snapshot-id|ephemeral]:[size in GB]:[Delete on Term]:[IOPS]
    - Leave inapplicable fields blank
    - Delete on Termination defaults to True
    - IOPS limits are not validated
    - EBS sizing is not validated

    Mount an Ephemeral Drive:
    /dev/xvdb1=ephemeral0

    Mount multiple Ephemeral Drives:
    /dev/xvdb1=ephemeral0,/dev/xvdb2=ephemeral1

    Mount a Snapshot:
    /dev/xvdp=snap-1234abcd

    Mount a Snapshot to a 100GB drive:
    /dev/xvdp=snap-1234abcd:100

    Mount a Snapshot to a 100GB drive and do not delete on termination:
    /dev/xvdp=snap-1234abcd:100:false

    Mount a Fresh 100GB EBS device
    /dev/xvdp=:100

    Mount a Fresh 100GB EBS Device and do not delete on termination:
    /dev/xvdp=:100:false

    Mount a Fresh 100GB EBS Device with 1000 IOPS
    /dev/xvdp=:100::1000
    """
    block_device_map = BlockDeviceMapping()
    mappings = user_input.split(",")
    for mapping in mappings:
        block_type = BlockDeviceType()
        mount_point, drive_type, size, delete, iops = _parse_drive_mapping(mapping)
        if 'ephemeral' in drive_type:
            block_type.ephemeral_name = drive_type
        elif 'snap' in drive_type:
            block_type.snapshot_id = drive_type
            block_type.volume_type = "standard"
        else:
            block_type.volume_type = "standard"
        block_type.size = size
        block_type.delete_on_termination = delete

        if iops:
            block_type.iops = iops
            block_type.volume_type = "io1"

        block_device_map[mount_point] = block_type
    return block_device_map
コード例 #13
0
ファイル: __init__.py プロジェクト: codysoyland/shaker
 def launch_instance(self):
     if not self.verify_settings():
         return
     block_map = BlockDeviceMapping()
     root_device = self.config["ec2_root_device"]
     block_map[root_device] = EBSBlockDeviceType()
     if self.config["ec2_size"]:
         block_map[root_device].size = self.config["ec2_size"]
     block_map[root_device].delete_on_termination = True
     for num, device_location in enumerate(self.config["ec2_ephemeral_devices"]):
         device = BlockDeviceType()
         device.ephemeral_name = "ephemeral%d" % num
         block_map[device_location] = device
     reservation = self.conn.run_instances(
         self.config["ec2_ami_id"],
         key_name=self.config["ec2_key_name"],
         security_groups=self.config["ec2_security_groups"] or [self.config["ec2_security_group"]],
         instance_type=self.config["ec2_instance_type"],
         placement=self.config["ec2_zone"],
         monitoring_enabled=self.config["ec2_monitoring_enabled"],
         block_device_map=block_map,
         user_data=self.user_data,
     )
     self.instance = reservation.instances[0]
     secs = RUN_INSTANCE_TIMEOUT
     rest_interval = 5
     while secs and not self.instance.state == "running":
         time.sleep(rest_interval)
         secs = secs - rest_interval
         try:
             self.instance.update()
         except boto.exception.EC2ResponseError:
             pass
     if secs <= 0:
         errmsg = "run instance %s failed after %d seconds" % (self.instance.id, RUN_INSTANCE_TIMEOUT)
         LOG.error(errmsg)
     else:
         if self.config["hostname"]:
             self.assign_name_tag()
         msg1 = "Started Instance: {0}\n".format(self.instance.id)
         LOG.info(msg1)
         print msg1
         p = int(self.config["ssh_port"])
         port = "-p {0} ".format(p) if p and not p == 22 else ""
         ## change user to 'root' for all non-Ubuntu systems
         user = self.config["sudouser"] if self.config["sudouser"] and self.config["ssh_import"] else "ubuntu"
         # XXX - TODO: replace public dns with fqdn, where appropriate
         msg2 = "To access: ssh {0}{1}@{2}\n" "To terminate: shaker-terminate {3}".format(
             port, user, self.instance.public_dns_name, self.instance.id
         )
         LOG.info(msg2)
         print msg2
コード例 #14
0
ファイル: functions.py プロジェクト: joshwilliams/autobench
def demandrequest(ec2, nodetype, testname, testdate, threads, userdata = None):
    global selected_availability_zone

    if nodetype == 'client':
        instancetype = 'c3.xlarge'
    else:
        instancetype = 'i2.xlarge'

    # Allow an explicit selection if needed...
    #selected_availability_zone = 'us-east-1e'
    if not selected_availability_zone:
        selected_availability_zone = random.choice([
            'us-east-1a',
            #'us-east-1b',
            'us-east-1d',
            'us-east-1e',
        ])
    availability_zone = selected_availability_zone

    if userdata == None:
        userdata = """#!/bin/bash
echo {0} > /etc/node_testname
echo {1} > /etc/node_testdate
echo {2} > /etc/node_threads
echo {3} > /etc/node_role
echo 10.136.71.116 > /etc/node_headnode
echo 400 > /etc/node_swap             # MB of swap created
echo 1 > /etc/node_mongo_uselocal     # Use local mongos shard server on each client
""".format(testname, testdate, threads, nodetype)

    # Default AMI
    ami = 'ami-XXXXXXXX' # Current versions

    # Specify ephemeral block devices...

    bdmap = BlockDeviceMapping()
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    bdmap['/dev/sdb'] = sdb
    #sdc = BlockDeviceType()
    #sdc.ephemeral_name = 'ephemeral1'
    #bdmap['/dev/sdc'] = sdc
    #sdd = BlockDeviceType()
    #sdd.ephemeral_name = 'ephemeral2'
    #bdmap['/dev/sdd'] = sdd
    #sde = BlockDeviceType()
    #sde.ephemeral_name = 'ephemeral3'
    #bdmap['/dev/sde'] = sde

    return ec2.run_instances(ami, placement=availability_zone, security_groups=['epstatic'], user_data=userdata, instance_type=instancetype, block_device_map=bdmap)
コード例 #15
0
ファイル: ec2_funcs.py プロジェクト: cengjing/cloudly
def clone_instance(instance):
    
	new_bdm = None
	ec2 = instance.connection

	if instance.block_device_mapping:
	
		root_device_name = instance.get_attribute('rootDeviceName')['rootDeviceName']
		user_data = instance.get_attribute('userData')['userData']
		# user_data comes back base64 encoded.  Need to decode it so it
		# can get re-encoded by run_instance !
		user_data = base64.b64decode(user_data)
		new_bdm = BlockDeviceMapping()

		for dev in instance.block_device_mapping:

			# if this entry is about the root device, skip it
			if dev != root_device_name:

				bdt = instance.block_device_mapping[dev]

				if bdt.volume_id:

					volume = ec2.get_all_volumes([bdt.volume_id])[0]
					snaps = volume.snapshots()

					if len(snaps) == 0:

						print 'No snapshots available for %s' % volume.id
					else:

						# sort the list of snapshots, newest is at the end now
						snaps.sort(key=lambda snap: snap.start_time)
						latest_snap = snaps[-1]
						new_bdt = BlockDeviceType()
						new_bdt.snapshot_id = latest_snap.id
						new_bdm[dev] = new_bdt

	return ec2.run_instances(
		instance.image_id,
		key_name=instance.key_name,
		security_groups=[g.name for g in instance.groups],
		user_data=user_data,
		instance_type=instance.instance_type,
		kernel_id=instance.kernel,
		ramdisk_id=instance.ramdisk,
		monitoring_enabled=instance.monitored,
		placement=instance.placement,
		block_device_map=new_bdm
	).instances[0]
コード例 #16
0
ファイル: openstack_ec2.py プロジェクト: RedHatQE/dva
 def _get_bmap(self, params):
     bmap = BlockDeviceMapping()
     for device in params['bmap']:
         if not 'name' in device.keys():
             self.logger.debug('bad device ' + str(device))
             continue
         dev = BlockDeviceType()
         if 'size' in device.keys():
             dev.size = device['size']
         if 'delete_on_termination' in device.keys():
             dev.delete_on_termination = device['delete_on_termination']
         if 'ephemeral_name' in device.keys():
             dev.ephemeral_name = device['ephemeral_name']
         bmap[device['name']] = dev
     return bmap
コード例 #17
0
ファイル: awsProvisioner.py プロジェクト: brainstorm/toil
    def _getBlockDeviceMapping(cls, instanceType, rootVolSize=50):
        # determine number of ephemeral drives via cgcloud-lib
        bdtKeys = ['', '/dev/xvdb', '/dev/xvdc', '/dev/xvdd']
        bdm = BlockDeviceMapping()
        # Change root volume size to allow for bigger Docker instances
        root_vol = BlockDeviceType(delete_on_termination=True)
        root_vol.size = rootVolSize
        bdm["/dev/xvda"] = root_vol
        # the first disk is already attached for us so start with 2nd.
        for disk in xrange(1, instanceType.disks + 1):
            bdm[bdtKeys[disk]] = BlockDeviceType(
                ephemeral_name='ephemeral{}'.format(disk - 1))  # ephemeral counts start at 0

        logger.debug('Device mapping: %s', bdm)
        return bdm
コード例 #18
0
ファイル: fabfile.py プロジェクト: 2mind/aws-provisioning
def _get_block_device_mapping(device_name, size):
    """ Returns a block device mapping object for the specified device and size.

    Block Device Mapping is used to associate a device on the VM with an EBS Volume.

    parameters:
    device_name -- The name of the device in the VM, such as /dev/sda1, /dev/sdb1. etc
    size -- The amount of space to allocate for the EBS drive.

    """
    block_device = BlockDeviceType()
    block_device.size = size
    bdm = BlockDeviceMapping()
    bdm[device_name] = block_device

    return bdm
コード例 #19
0
ファイル: awshelpers.py プロジェクト: Ramblurr/ephemvpn
def _get_block_device_mapping(device_name, size, delete_on_terminate = False):
    """ Returns a block device mapping object for the specified device and size.

    Block Device Mapping is used to associate a device on the VM with an EBS Volume.

    parameters:
    device_name -- The name of the device in the VM, such as /dev/sda1, /dev/sdb1. etc
    size -- The amount of space to allocate for the EBS drive.
    delete_on_terminate -- Whether the volume should be deleted when the instance is terminated

    """
    block_device = BlockDeviceType(delete_on_termination=delete_on_terminate)
    block_device.size = size
    bdm = BlockDeviceMapping()
    bdm[device_name] = block_device

    return bdm
コード例 #20
0
def get_block_device(instance_type, ebs_vol_size):
    block_map = BlockDeviceMapping()

    if ebs_vol_size > 0:
        device = EBSBlockDeviceType()
        device.size = ebs_vol_size
        device.delete_on_termination = True
        block_map['/dev/sdv'] = device

    for i in range(get_num_disks(instance_type)):
        dev = BlockDeviceType()
        dev.ephemeral_name = 'ephemeral%d' % i
        # The first ephemeral drive is /dev/sdb.
        name = '/dev/sd' + string.ascii_letters[i + 1]
        block_map[name] = dev

    return block_map
コード例 #21
0
def create_block_device_mapping(ami, device_map):
    bdm = BlockDeviceMapping()
    for device, device_info in device_map.items():
        if ami.root_device_type == "instance-store" and \
                not device_info.get("ephemeral_name"):
            # EBS is not supported by S3-backed AMIs at request time
            # EBS volumes can be attached when an instance is running
            continue
        bd = BlockDeviceType()
        if device_info.get('size'):
            bd.size = device_info['size']
        if ami.root_device_name == device:
            ami_size = ami.block_device_mapping[device].size
            if ami.virtualization_type == "hvm":
                # Overwrite root device size for HVM instances, since they
                # cannot be resized online
                bd.size = ami_size
            elif device_info.get('size'):
                # make sure that size is enough for this AMI
                assert ami_size <= device_info['size'], \
                    "Instance root device size cannot be smaller than AMI " \
                    "root device"
        if device_info.get("delete_on_termination") is not False:
            bd.delete_on_termination = True
        if device_info.get("ephemeral_name"):
            bd.ephemeral_name = device_info["ephemeral_name"]
        if device_info.get("volume_type"):
            bd.volume_type = device_info["volume_type"]
            if device_info["volume_type"] == "io1" \
                    and device_info.get("iops"):
                bd.iops = device_info["iops"]

        bdm[device] = bd
    return bdm
コード例 #22
0
ファイル: ec2.py プロジェクト: zentoo/metro
    def start_remote(self):
        self.get_bootstrap_kernel()
        self.get_bootstrap_image()

        # create EBS volume for /mnt/gentoo
        device = BlockDeviceType()
        device.size = self.settings["ec2/instance/device/size"]
        device.delete_on_termination = True

        mapping = BlockDeviceMapping()
        self.root_device = "/dev/" + self.settings["ec2/instance/device/name"]
        mapping[self.root_device] = device

        # start bootstrapping instance
        reservation = self.ec2.run_instances(self.bootstrap_image.id,
                kernel_id=self.bootstrap_kernel.id,
                instance_type=self.settings["ec2/instance/type"],
                security_groups=[self.name],
                key_name=self.name,
                block_device_map=mapping)

        self.instance = reservation.instances[0]

        sys.stdout.write("waiting for instance to come up ..")
        while self.instance.update() != 'running':
            sys.stdout.write(".")
            sys.stdout.flush()
            time.sleep(5)
        sys.stdout.write("\n")
        time.sleep(120)

        self.ssh_uri = "ec2-user@" + self.instance.public_dns_name
        self.ssh_port = "22"
        self.remote_upload_path = "/tmp"

        # enable sudo without a tty
        cmd = "sudo sed -i -e '/requiretty/d' /etc/sudoers"
        cmd = ["ssh", "-t"] + self.ssh_options() + [self.ssh_uri, cmd]
        ssh = subprocess.Popen(cmd)
        ssh.wait()

        self.run_script_at_remote("steps/remote/postboot")
コード例 #23
0
ファイル: fabfile.py プロジェクト: sleekslush/Admiral
def launch(name, ami='ami-3d4ff254', instance_type='t1.micro', key_name='amazon2', 
           zone='us-east-1d', security_group='quicklaunch-1', user='******', job=None):
    '''Launch a single instance of the provided ami '''
    conn = EC2Connection()
    # Declare the block device mapping for ephemeral disks
    mapping = BlockDeviceMapping()
    eph0 = BlockDeviceType()
    eph1 = BlockDeviceType()
    eph0.ephemeral_name = 'ephemeral0'
    eph1.ephemeral_name = 'ephemeral1'
    mapping['/dev/sdb'] = eph0
    mapping['/dev/sdc'] = eph1
    # Now, ask for a reservation
    reservation = conn.run_instances(ami, instance_type=instance_type, 
                                     key_name=key_name, placement=zone, 
                                     block_device_map=mapping, security_groups=[security_group])
    # And assume that the instance we're talking about is the first in the list
    # This is not always a good assumption, and will likely depend on the specifics
    # of your launching situation. For launching an isolated instance while no
    # other actions are taking place, this is sufficient.
    instance = reservation.instances[0]
    print('Waiting for instance to start...')
    # Check up on its status every so often
    status = instance.update()
    while status == 'pending':
        time.sleep(5)
        status = instance.update()
    if status == 'running':
        print('New instance "' + instance.id + '" accessible at ' + instance.public_dns_name)
        # Name the instance
        conn.create_tags([instance.id], {'Name': name})

        n = Node(name, instance.id, instance.image_id, instance.key_name, instance.placement,
                instance.instance_type, instance.dns_name, instance.private_dns_name,
                instance.ip_address, instance.private_ip_address, user, job)
    
        pprint.pprint(n.to_dict())
        addNode(n)
    
    else:
        print('Instance status: ' + status)
        return
コード例 #24
0
def start_instances(ec2cxn, instance_count, image_id, use_ephemeral = False, instance_type="c1.xlarge"):
    
    print "Attempting to start ", instance_count, " instances of image: ", image_id     
    
    if use_ephemeral:
        dev_map = BlockDeviceMapping()
        sdb1 = BlockDeviceType()
        sdb1.ephemeral_name = 'ephemeral0'
        dev_map['/dev/sdb1'] = sdb1
        reservation = ec2cxn.run_instances(
          image_id, min_count=1, 
          max_count=instance_count, 
          block_device_map=dev_map, 
          security_groups=SECURITY_GROUPS, 
          key_name="capk", 
          instance_type=instance_type)
    else:
        reservation = ec2cxn.run_instances(
          image_id, 
           min_count=1, 
           max_count=instance_count, 
           security_groups=SECURITY_GROUPS, 
           key_name="capk", 
           instance_type=instance_type)

    instances = reservation.instances
    # never leave instances running at the end of the script
    def kill_instances():
      for instance in instances:
        instance.update()
        if instance.state != 'terminated':
          print "Killing ", instance 
          instance.terminate()
    atexit.register(kill_instances)

    print "Started ", instance_count, " instances"
    for i in instances:
        print "  =>", i.id
    if len(instances) != instance_count:
      print "Expected %d instances, got %d" % (instance_count, len(instances))
       
    return instances
コード例 #25
0
def run_instance(conn, ami_id, key_name, instance_type, sec_group, zone, vol_size=None):
    bdm = None
    if vol_size:
        # Create block device mapping info
        dev_sda1 = BlockDeviceType()
        dev_sda1.size = int(vol_size)
        dev_sda1.delete_on_termination = True
        bdm = BlockDeviceMapping()
        bdm["/dev/sda1"] = dev_sda1

    # Run instance
    reservation = conn.run_instances(
        ami_id,
        key_name=key_name,
        instance_type=instance_type,
        placement=zone,
        instance_initiated_shutdown_behavior="stop",
        security_groups=[sec_group],
        block_device_map=bdm,
    )
    return reservation.instances[0]
コード例 #26
0
ファイル: ec2.py プロジェクト: dalehamel/metro
    def capture(self):
        volume = self.ec2.get_all_volumes(filters={
            'attachment.instance-id': self.instance.id,
            'attachment.device': self.root_device,
        })[0]

        snapshot = self.ec2.create_snapshot(volume.id)

        sys.stdout.write("waiting for snapshot to complete ..")
        while snapshot.status != 'completed':
            sys.stdout.write(".")
            sys.stdout.flush()
            time.sleep(5)
            snapshot.update()
        sys.stdout.write("\n")

        # create EBS mapping
        device = BlockDeviceType()
        device.snapshot_id = snapshot.id

        mapping = BlockDeviceMapping()
        mapping['/dev/sda'] = device

        self.get_instance_kernel()
        image = self.ec2.register_image(name=self.name, description=self.name,
                architecture=self.arch, kernel_id=self.instance_kernel.id,
                root_device_name='/dev/sda', block_device_map=mapping)

        if self.settings["target/permission"] == "public":
            self.ec2.modify_image_attribute(image, groups='all')

        with open(self.settings["path/mirror/target"], "w") as fd:
            cmd = [
                "ec2-run-instances",
                "--region", self.region,
                "--instance-type", "t1.micro",
                image,
            ]
            fd.write(" ".join(cmd))
            fd.write("\n")
コード例 #27
0
ファイル: eucacommand.py プロジェクト: honnix/euca2ools
 def parse_block_device_args(self, block_device_maps_args):
     block_device_map = BlockDeviceMapping()
     for block_device_map_arg in block_device_maps_args:
         parts = block_device_map_arg.split('=')
         if len(parts) > 1:
             device_name = parts[0]
             block_dev_type = BlockDeviceType()
             value_parts = parts[1].split(':')
             if value_parts[0].startswith('snap'):
                 block_dev_type.snapshot_id = value_parts[0]
             else:
                 if value_parts[0].startswith('ephemeral'):
                     block_dev_type.ephemeral_name = value_parts[0]
             if len(value_parts) > 1:
                 try:
                     block_dev_type.size = int(value_parts[1])
                 except ValueError:
                     pass
             if len(value_parts) > 2:
                 if value_parts[2] == 'true':
                     block_dev_type.delete_on_termination = True
             block_device_map[device_name] = block_dev_type
     return block_device_map
コード例 #28
0
ファイル: services.py プロジェクト: gauravve/cloudbridge
    def _process_block_device_mappings(self, launch_config, zone=None):
        """
        Processes block device mapping information
        and returns a Boto BlockDeviceMapping object. If new volumes
        are requested (source is None and destination is VOLUME), they will be
        created and the relevant volume ids included in the mapping.
        """
        bdm = BlockDeviceMapping()
        # Assign letters from f onwards
        # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
        next_letter = iter(list(string.ascii_lowercase[6:]))
        # assign ephemeral devices from 0 onwards
        ephemeral_counter = 0
        for device in launch_config.block_devices:
            bd_type = BlockDeviceType()

            if device.is_volume:
                if device.is_root:
                    bdm['/dev/sda1'] = bd_type
                else:
                    bdm['sd' + next(next_letter)] = bd_type

                if isinstance(device.source, Snapshot):
                    bd_type.snapshot_id = device.source.id
                elif isinstance(device.source, Volume):
                    bd_type.volume_id = device.source.id
                elif isinstance(device.source, MachineImage):
                    # Not supported
                    pass
                else:
                    # source is None, but destination is volume, therefore
                    # create a blank volume. If the Zone is None, this
                    # could fail since the volume and instance may be created
                    # in two different zones.
                    if not zone:
                        raise InvalidConfigurationException(
                            "A zone must be specified when launching with a"
                            " new blank volume block device mapping.")
                    new_vol = self.provider.block_store.volumes.create(
                        '',
                        device.size,
                        zone)
                    bd_type.volume_id = new_vol.id
                bd_type.delete_on_terminate = device.delete_on_terminate
                if device.size:
                    bd_type.size = device.size
            else:  # device is ephemeral
                bd_type.ephemeral_name = 'ephemeral%s' % ephemeral_counter

        return bdm
コード例 #29
0
ファイル: models.py プロジェクト: bobbyi/moto
 def _parse_block_device_mappings(self):
     block_device_map = BlockDeviceMapping()
     for mapping in self.block_device_mapping_dict:
         block_type = BlockDeviceType()
         mount_point = mapping.get('device_name')
         if 'ephemeral' in mapping.get('virtual_name', ''):
             block_type.ephemeral_name = mapping.get('virtual_name')
         else:
             block_type.volume_type = mapping.get('ebs._volume_type')
             block_type.snapshot_id = mapping.get('ebs._snapshot_id')
             block_type.delete_on_termination = mapping.get('ebs._delete_on_termination')
             block_type.size = mapping.get('ebs._volume_size')
             block_type.iops = mapping.get('ebs._iops')
         block_device_map[mount_point] = block_type
     return block_device_map
コード例 #30
0
ファイル: ec2.py プロジェクト: cutefish/PyUtils
 def startCluster(self, argv):
     if len(argv) != 0:
         print "ec2 startCluster"
         sys.exit(-1)
     regions = boto.ec2.regions()
     regionInfo = '\n'.join(str(region).split(':')[1] for region in regions)
     regionName = raw_input("select region:\n%s\n>>"%regionInfo)
     region = boto.ec2.get_region(regionName)
     conn = region.connect()
     print "region connected successfully"
     images = conn.get_all_images(owners='self')
     imageInfo = '\n'.join(
         str(image).split(':')[1] + ":" + image.name for image in images)
     imageId = raw_input("enter imageId:\nself-created images:\n%s\n>>"%imageInfo)
     instanceTypeInfo = ("m1.small, " "m1.large, " "m1.xlarge\n"
                         "c1.medium, " "c1.xlarge\n"
                         "m2.xlarge, " "m2.2xlarge, " "m2.4xlarge\n"
                         "cc1.4xlarge, " "t1.micro\n")
     instanceType = raw_input("enter instanceType:\n%s\n>>"%instanceTypeInfo)
     availZone = raw_input("enter placement[a,b,c]:\n>>")
     availZone = regionName + availZone
     diskSize = int(raw_input("enter disk size[G]:\n>>"))
     rootDev = BlockDeviceType()
     rootDev.name = 'root'
     rootDev.size = diskSize
     rootDev.delete_on_termination = True
     instStorage = bool(raw_input("mount inst storage?\n>>"))
     mapping = BlockDeviceMapping()
     mapping['/dev/sda1'] = rootDev
     if (instStorage == True):
         eph0 = BlockDeviceType()
         eph0.ephemeral_name = 'ephemeral0'
     mapping['/dev/sdb'] = eph0
     groups = conn.get_all_security_groups()
     groupInfo = '\n'.join(str(group).split(':')[1] for group in groups)
     group = raw_input("enter securityGroup:\n%s\n>>"%groupInfo)
     keys = conn.get_all_key_pairs()
     if len(keys) == 1:
         key = keys[0].name
         print 'using default key: ' + key
     else:
         keyInfo = '\n'.join(str(key).split(':')[1] for key in keys)
         key = raw_input("enter key name:\n%s\n>>"%keyInfo)
     numNodes = int(raw_input("number of nodes:\n>>"))
     conn.run_instances(
         imageId, min_count=numNodes, max_count=numNodes, placement=availZone,
         security_groups = [group], instance_type=instanceType,
         block_device_map=mapping, key_name=key)
コード例 #31
0
ファイル: abbey.py プロジェクト: tpk543/edx-configuration
def create_instance_args():
    """
    Looks up security group, subnet
    and returns arguments to pass into
    ec2.run_instances() including
    user data
    """

    vpc = boto.vpc.connect_to_region(args.region)
    subnet = vpc.get_all_subnets(filters={
        'tag:aws:cloudformation:stack-name': stack_name,
        'tag:play': args.play
    })

    if len(subnet) < 1:
        #
        # try scheme for non-cloudformation builds
        #

        subnet = vpc.get_all_subnets(
            filters={
                'tag:play': args.play,
                'tag:environment': args.environment,
                'tag:deployment': args.deployment
            })

    if len(subnet) < 1:
        sys.stderr.write(
            "ERROR: Expected at least one subnet, got {} for {}-{}-{}\n".
            format(len(subnet), args.environment, args.deployment, args.play))
        sys.exit(1)
    subnet_id = subnet[0].id
    vpc_id = subnet[0].vpc_id

    security_group_id = get_instance_sec_group(vpc_id)

    if args.identity:
        config_secure = 'true'
        with open(args.identity) as f:
            identity_contents = f.read()
    else:
        config_secure = 'false'
        identity_contents = "dummy"

    user_data = """#!/bin/bash
set -x
set -e
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
base_dir="/var/tmp/edx-cfg"
extra_vars="$base_dir/extra-vars-$$.yml"
secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
configuration_private_version="{configuration_private_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
cluster="{play}"
config_secure={config_secure}
git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name=$(basename $git_repo_secure .git)
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
secure_vars_file={secure_vars_file}
environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml"
deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/{playbook_dir}"

if $config_secure; then
    git_cmd="env GIT_SSH=$git_ssh git"
else
    git_cmd="git"
fi

ANSIBLE_ENABLE_SQS=true
SQS_NAME={queue_name}
SQS_REGION={region}
SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]"
PYTHONUNBUFFERED=1
HIPCHAT_TOKEN={hipchat_token}
HIPCHAT_ROOM={hipchat_room}
HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: "
HIPCHAT_FROM="ansible-$instance_id"
HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1)
DATADOG_API_KEY={datadog_api_key}
# environment for ansible
export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM
export HIPCHAT_MSG_COLOR DATADOG_API_KEY

if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
    echo "Installing pkg dependencies"
    /usr/bin/apt-get update
    /usr/bin/apt-get install -y git python-pip python-apt \\
        git-core build-essential python-dev libxml2-dev \\
        libxslt-dev curl libmysqlclient-dev --force-yes
fi

# python3 is required for certain other things
# (currently xqwatcher so it can run python2 and 3 grader code,
# but potentially more in the future). It's not available on Ubuntu 12.04,
# but in those cases we don't need it anyways.
if [[ -n "$(apt-cache search --names-only '^python3-pip$')" ]]; then
    /usr/bin/apt-get update
    /usr/bin/apt-get install -y python3-pip python3-dev
fi

# this is missing on 14.04 (base package on 12.04)
# we need to do this on any build, since the above apt-get
# only runs on a build from scratch
/usr/bin/apt-get install -y python-httplib2 --force-yes

# Must upgrade to latest before pinning to work around bug
# https://github.com/pypa/pip/issues/3862
pip install --upgrade pip
hash -r   #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
pip install --upgrade pip==8.1.2

# upgrade setuptools early to avoid no distribution errors
pip install --upgrade setuptools==24.0.3

rm -rf $base_dir
mkdir -p $base_dir
cd $base_dir

cat << EOF > $git_ssh
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@"
EOF

chmod 755 $git_ssh

if $config_secure; then
    cat << EOF > $secure_identity
{identity_contents}
EOF
fi

cat << EOF >> $extra_vars
---
# extra vars passed into
# abbey.py including versions
# of all the repositories
{extra_vars_yml}

# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true

disable_edx_services: true
COMMON_TAG_EC2_INSTANCE: true

# abbey should never take instances in
# and out of elbs
elb_pre_post: false
EOF

chmod 400 $secure_identity

$git_cmd clone $git_repo $git_repo_name
cd $git_repo_name
$git_cmd checkout $configuration_version
cd $base_dir

if $config_secure; then
    $git_cmd clone $git_repo_secure $git_repo_secure_name
    cd $git_repo_secure_name
    $git_cmd checkout $configuration_secure_version
    cd $base_dir
fi

if [[ ! -z $git_repo_private ]]; then
    $git_cmd clone $git_repo_private $git_repo_private_name
    cd $git_repo_private_name
    $git_cmd checkout $configuration_private_version
    cd $base_dir
fi


cd $base_dir/$git_repo_name
sudo pip install -r pre-requirements.txt
sudo pip install -r requirements.txt

cd $playbook_dir

if [[ -r "$deployment_secure_vars" ]]; then
    extra_args_opts+=" -e@$deployment_secure_vars"
fi

if [[ -r "$environment_deployment_secure_vars" ]]; then
    extra_args_opts+=" -e@$environment_deployment_secure_vars"
fi

if $secure_vars_file; then
    extra_args_opts+=" -e@$secure_vars_file"
fi

extra_args_opts+=" -e@$extra_vars"

ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts

rm -rf $base_dir

    """.format(
        hipchat_token=args.hipchat_api_token,
        hipchat_room=args.ansible_hipchat_room_id,
        configuration_version=args.configuration_version,
        configuration_secure_version=args.configuration_secure_version,
        configuration_secure_repo=args.configuration_secure_repo,
        configuration_private_version=args.configuration_private_version,
        configuration_private_repo=args.configuration_private_repo,
        environment=args.environment,
        deployment=args.deployment,
        play=args.play,
        playbook_dir=args.playbook_dir,
        config_secure=config_secure,
        identity_contents=identity_contents,
        queue_name=run_id,
        extra_vars_yml=extra_vars_yml,
        secure_vars_file=secure_vars_file,
        cache_id=args.cache_id,
        datadog_api_key=args.datadog_api_key,
        region=args.region)

    mapping = BlockDeviceMapping()
    root_vol = BlockDeviceType(size=args.root_vol_size, volume_type='gp2')
    mapping['/dev/sda1'] = root_vol

    ec2_args = {
        'security_group_ids': [security_group_id],
        'subnet_id': subnet_id,
        'key_name': args.keypair,
        'image_id': base_ami,
        'instance_type': args.instance_type,
        'instance_profile_name': args.role_name,
        'user_data': user_data,
        'block_device_map': mapping,
    }

    return ec2_args
コード例 #32
0
def create_instance(name, config, region, key_name, ssh_key, instance_data,
                    deploypass, loaned_to, loan_bug, create_ami,
                    ignore_subnet_check, max_attempts):
    """Creates an AMI instance with the given name and config. The config must
    specify things like ami id."""
    conn = get_aws_connection(region)
    # Make sure we don't request the same things twice
    token = str(uuid.uuid4())[:16]

    instance_data = instance_data.copy()
    instance_data['name'] = name
    instance_data['domain'] = config['domain']
    instance_data['hostname'] = '{name}.{domain}'.format(
        name=name, domain=config['domain'])

    ami = conn.get_all_images(image_ids=[config["ami"]])[0]
    bdm = None
    if 'device_map' in config:
        bdm = BlockDeviceMapping()
        for device, device_info in config['device_map'].items():
            bd = BlockDeviceType()
            if device_info.get('size'):
                bd.size = device_info['size']
            # Overwrite root device size for HVM instances, since they cannot
            # be resized online
            if ami.virtualization_type == "hvm" and \
                    ami.root_device_name == device:
                bd.size = ami.block_device_mapping[ami.root_device_name].size
            if device_info.get("delete_on_termination") is not False:
                bd.delete_on_termination = True
            if device_info.get("ephemeral_name"):
                bd.ephemeral_name = device_info["ephemeral_name"]
            if device_info.get("volume_type"):
                bd.volume_type = device_info["volume_type"]
                if device_info["volume_type"] == "io1" \
                        and device_info.get("iops"):
                    bd.iops = device_info["iops"]

            bdm[device] = bd

    security_group_ids = config.get('security_group_ids', [])
    if loaned_to:
        security_group_ids += config.get('loaner_security_group_ids', [])

    interfaces = make_instance_interfaces(region, instance_data['hostname'],
                                          ignore_subnet_check,
                                          config.get('subnet_ids'),
                                          security_group_ids,
                                          config.get("use_public_ip"))

    keep_going, attempt = True, 1
    while keep_going:
        try:
            puppet_master = pick_puppet_master(
                instance_data.get('puppet_masters'))
            user_data = user_data_from_template(
                config['type'], {
                    "puppet_server": puppet_master,
                    "fqdn": instance_data['hostname'],
                    "hostname": instance_data['name'],
                    "domain": instance_data['domain'],
                    "dns_search_domain": config.get('dns_search_domain'),
                    "password": deploypass,
                    "moz_instance_type": config['type'],
                    "region_dns_atom": get_region_dns_atom(region)
                })

            reservation = conn.run_instances(
                image_id=config['ami'],
                key_name=key_name,
                instance_type=config['instance_type'],
                block_device_map=bdm,
                client_token=token,
                disable_api_termination=config.get('disable_api_termination'),
                user_data=user_data,
                instance_profile_name=config.get('instance_profile_name'),
                network_interfaces=interfaces,
            )
            break
        except boto.exception.BotoServerError:
            log.exception("Cannot start an instance")
        time.sleep(10)
        if max_attempts:
            attempt += 1
            keep_going = max_attempts >= attempt

    instance = reservation.instances[0]
    log.info("instance %s created, waiting to come up", instance)
    # Wait for the instance to come up
    wait_for_status(instance, "state", "running", "update")
    instance.add_tag('Name', name)
    instance.add_tag('FQDN', instance_data['hostname'])
    instance.add_tag('created',
                     time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime()))
    instance.add_tag('moz-type', config['type'])
    if loaned_to:
        instance.add_tag("moz-loaned-to", loaned_to)
    if loan_bug:
        instance.add_tag("moz-bug", loan_bug)

    log.info("assimilating %s", instance)
    instance.add_tag('moz-state', 'pending')

    keep_going, attempt = True, 1
    while keep_going:
        try:
            # Don't reboot if need to create ami
            reboot = not create_ami
            assimilate_instance(instance=instance,
                                config=config,
                                ssh_key=ssh_key,
                                instance_data=instance_data,
                                deploypass=deploypass,
                                reboot=reboot)
            break
        except NetworkError as e:
            # it takes a while for the machine to start/reboot so the
            # NetworkError exception is quite common, just log the error,
            # without the full stack trace
            log.warn(
                "cannot connect; instance may still be starting  %s (%s, %s) - %s,"
                "retrying in %d sec ...", instance_data['hostname'],
                instance.id, instance.private_ip_address, e, FAILURE_TIMEOUT)
            time.sleep(FAILURE_TIMEOUT)

        except:  # noqa: E722
            # any other exception
            log.warn(
                "problem assimilating %s (%s, %s), retrying in "
                "%d sec ...",
                instance_data['hostname'],
                instance.id,
                instance.private_ip_address,
                FAILURE_TIMEOUT,
                exc_info=True)
            time.sleep(FAILURE_TIMEOUT)
        if max_attempts:
            attempt += 1
            keep_going = max_attempts >= attempt

    instance.add_tag('moz-state', 'ready')
    if create_ami:
        ami_name = "spot-%s-%s" % (
            config['type'], time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
        log.info("Generating AMI %s", ami_name)
        ami_cleanup(mount_point="/", distro=config["distro"])
        root_bd = instance.block_device_mapping[instance.root_device_name]
        volume = instance.connection.get_all_volumes(
            volume_ids=[root_bd.volume_id])[0]
        # The instance has to be stopped to flush EBS caches
        # The sleep is to prevent the occasional interference of the shutdown with the capture of Windows AMIs
        time.sleep(15)
        instance.stop()
        wait_for_status(instance, 'state', 'stopped', 'update')
        ami = volume_to_ami(volume=volume,
                            ami_name=ami_name,
                            arch=instance.architecture,
                            virtualization_type=instance.virtualization_type,
                            kernel_id=instance.kernel,
                            root_device_name=instance.root_device_name,
                            tags=config["tags"])
        log.info("AMI %s (%s) is ready", ami_name, ami.id)
        log.warn("Terminating %s", instance)
        instance.terminate()
コード例 #33
0
import os
import boto.ec2
from pprint import pprint


def cloudconfig():
    base_path = os.path.dirname(os.path.realpath(__file__))
    cloud_config = open(os.path.join(base_path, 'cloud-config'))
    return cloud_config.read()
    close(os.path.join(base_path, 'cloud-config'))


conn = boto.ec2.connect_to_region("eu-west-1")

from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType

block_device_map = BlockDeviceMapping()
block_dev_type = BlockDeviceType()
block_dev_type.delete_on_termination = True
block_device_map['/dev/sda'] = block_dev_type

i = conn.run_instances('ami-0c10417b',
                       key_name='jpazdyga',
                       user_data=cloudconfig(),
                       instance_type='t1.micro',
                       security_groups=['automat'],
                       block_device_map=block_device_map)

print(i.instances)
コード例 #34
0
def create_instance(name, config, region, secrets, key_name, instance_data,
                    deploypass, loaned_to, loan_bug):
    """Creates an AMI instance with the given name and config. The config must
    specify things like ami id."""
    conn = get_connection(
        region,
        aws_access_key_id=secrets['aws_access_key_id'],
        aws_secret_access_key=secrets['aws_secret_access_key'])
    vpc = get_vpc(connection=conn,
                  aws_access_key_id=secrets['aws_access_key_id'],
                  aws_secret_access_key=secrets['aws_secret_access_key'])

    # Make sure we don't request the same things twice
    token = str(uuid.uuid4())[:16]

    instance_data = instance_data.copy()
    instance_data['name'] = name
    instance_data['hostname'] = '{name}.{domain}'.format(
        name=name, domain=config['domain'])

    ami = conn.get_all_images(image_ids=[config["ami"]])[0]
    bdm = None
    if 'device_map' in config:
        bdm = BlockDeviceMapping()
        for device, device_info in config['device_map'].items():
            bd = BlockDeviceType()
            if device_info.get('size'):
                bd.size = device_info['size']
            # Overwrite root device size for HVM instances, since they cannot
            # be resized online
            if ami.virtualization_type == "hvm" and \
                    ami.root_device_name == device:
                bd.size = ami.block_device_mapping[ami.root_device_name].size
            if device_info.get("delete_on_termination") is not False:
                bd.delete_on_termination = True
            if device_info.get("ephemeral_name"):
                bd.ephemeral_name = device_info["ephemeral_name"]

            bdm[device] = bd

    ip_address = get_ip(instance_data['hostname'])
    subnet_id = None

    if ip_address:
        s_id = get_subnet_id(vpc, ip_address)
        if s_id in config['subnet_ids']:
            if ip_available(conn, ip_address):
                subnet_id = s_id
            else:
                log.warning("%s already assigned" % ip_address)

    if not ip_address or not subnet_id:
        ip_address = None
        subnet_id = choice(config.get('subnet_ids'))
    interface = NetworkInterfaceSpecification(
        subnet_id=subnet_id,
        private_ip_address=ip_address,
        delete_on_termination=True,
        groups=config.get('security_group_ids', []),
        associate_public_ip_address=config.get("use_public_ip"))
    interfaces = NetworkInterfaceCollection(interface)

    while True:
        try:
            reservation = conn.run_instances(
                image_id=config['ami'],
                key_name=key_name,
                instance_type=config['instance_type'],
                block_device_map=bdm,
                client_token=token,
                disable_api_termination=bool(
                    config.get('disable_api_termination')),
                network_interfaces=interfaces,
                instance_profile_name=config.get("instance_profile_name"),
            )
            break
        except boto.exception.BotoServerError:
            log.exception("Cannot start an instance")
        time.sleep(10)

    instance = reservation.instances[0]
    log.info("instance %s created, waiting to come up", instance)
    # Wait for the instance to come up
    while True:
        try:
            instance.update()
            if instance.state == 'running':
                break
        except Exception:
            log.warn("waiting for instance to come up, retrying in 10 sec...")
        time.sleep(10)

    instance.add_tag('Name', name)
    instance.add_tag('FQDN', instance_data['hostname'])
    instance.add_tag('created',
                     time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime()))
    instance.add_tag('moz-type', config['type'])
    if loaned_to:
        instance.add_tag("moz-loaned-to", loaned_to)
    if loan_bug:
        instance.add_tag("moz-bug", loan_bug)

    log.info("assimilating %s", instance)
    instance.add_tag('moz-state', 'pending')
    while True:
        try:
            assimilate(instance.private_ip_address, config, instance_data,
                       deploypass)
            break
        except:
            log.warn("problem assimilating %s (%s), retrying in 10 sec ...",
                     instance_data['hostname'], instance.id)
            time.sleep(10)
    instance.add_tag('moz-state', 'ready')
コード例 #35
0
def create_instance_args():
    """
    Looks up security group, subnet
    and returns arguments to pass into
    ec2.run_instances() including
    user data
    """

    vpc = VPCConnection()
    subnet = vpc.get_all_subnets(filters={
        'tag:aws:cloudformation:stack-name': stack_name,
        'tag:play': args.play
    })

    if len(subnet) < 1:
        #
        # try scheme for non-cloudformation builds
        #

        subnet = vpc.get_all_subnets(
            filters={
                'tag:cluster': args.play,
                'tag:environment': args.environment,
                'tag:deployment': args.deployment
            })

    if len(subnet) < 1:
        sys.stderr.write(
            "ERROR: Expected at least one subnet, got {}\n".format(
                len(subnet)))
        sys.exit(1)
    subnet_id = subnet[0].id
    vpc_id = subnet[0].vpc_id

    security_group_id = get_instance_sec_group(vpc_id)

    if args.identity:
        config_secure = 'true'
        with open(args.identity) as f:
            identity_contents = f.read()
    else:
        config_secure = 'false'
        identity_contents = "dummy"

    user_data = """#!/bin/bash
set -x
set -e
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
base_dir="/var/tmp/edx-cfg"
extra_vars="$base_dir/extra-vars-$$.yml"
secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
configuration_private_version="{configuration_private_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
config_secure={config_secure}
git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name=$(basename $git_repo_secure .git)
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
secure_vars_file={secure_vars_file}
environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml"
deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/{playbook_dir}"

if $config_secure; then
    git_cmd="env GIT_SSH=$git_ssh git"
else
    git_cmd="git"
fi

ANSIBLE_ENABLE_SQS=true
SQS_NAME={queue_name}
SQS_REGION=us-east-1
SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]"
PYTHONUNBUFFERED=1
HIPCHAT_TOKEN={hipchat_token}
HIPCHAT_ROOM={hipchat_room}
HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: "
HIPCHAT_FROM="ansible-$instance_id"
HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1)
# environment for ansible
export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM HIPCHAT_MSG_COLOR

if [[ ! -x /usr/bin/git || ! -x /usr/bin/pip ]]; then
    echo "Installing pkg dependencies"
    /usr/bin/apt-get update
    /usr/bin/apt-get install -y git python-pip python-apt \\
        git-core build-essential python-dev libxml2-dev \\
        libxslt-dev curl --force-yes
fi


rm -rf $base_dir
mkdir -p $base_dir
cd $base_dir

cat << EOF > $git_ssh
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@"
EOF

chmod 755 $git_ssh

if $config_secure; then
    cat << EOF > $secure_identity
{identity_contents}
EOF
fi

cat << EOF >> $extra_vars
---
# extra vars passed into
# abbey.py including versions
# of all the repositories
{extra_vars_yml}

# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true

disable_edx_services: true
COMMON_TAG_EC2_INSTANCE: true

# abbey should never take instances in
# and out of elbs
elb_pre_post: false
EOF

chmod 400 $secure_identity

$git_cmd clone $git_repo $git_repo_name
cd $git_repo_name
$git_cmd checkout $configuration_version
cd $base_dir

if $config_secure; then
    $git_cmd clone $git_repo_secure $git_repo_secure_name
    cd $git_repo_secure_name
    $git_cmd checkout $configuration_secure_version
    cd $base_dir
fi

if [[ ! -z $git_repo_private ]]; then
    $git_cmd clone $git_repo_private $git_repo_private_name
    cd $git_repo_private_name
    $git_cmd checkout $configuration_private_version
    cd $base_dir
fi


cd $base_dir/$git_repo_name
sudo pip install -r requirements.txt

cd $playbook_dir

if [[ -r "$deployment_secure_vars" ]]; then
    extra_args_opts+=" -e@$deployment_secure_vars"
fi

if [[ -r "$environment_deployment_secure_vars" ]]; then
    extra_args_opts+=" -e@$environment_deployment_secure_vars"
fi

if $secure_vars_file; then
    extra_args_opts+=" -e@$secure_vars_file"
fi

extra_args_opts+=" -e@$extra_vars"

ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts

rm -rf $base_dir

    """.format(
        hipchat_token=args.hipchat_api_token,
        hipchat_room=args.ansible_hipchat_room_id,
        configuration_version=args.configuration_version,
        configuration_secure_version=args.configuration_secure_version,
        configuration_secure_repo=args.configuration_secure_repo,
        configuration_private_version=args.configuration_private_version,
        configuration_private_repo=args.configuration_private_repo,
        environment=args.environment,
        deployment=args.deployment,
        play=args.play,
        playbook_dir=args.playbook_dir,
        config_secure=config_secure,
        identity_contents=identity_contents,
        queue_name=run_id,
        extra_vars_yml=extra_vars_yml,
        secure_vars_file=secure_vars_file,
        cache_id=args.cache_id)

    mapping = BlockDeviceMapping()
    root_vol = BlockDeviceType(size=args.root_vol_size, volume_type='gp2')
    mapping['/dev/sda1'] = root_vol

    ec2_args = {
        'security_group_ids': [security_group_id],
        'subnet_id': subnet_id,
        'key_name': args.keypair,
        'image_id': base_ami,
        'instance_type': args.instance_type,
        'instance_profile_name': args.role_name,
        'user_data': user_data,
        'block_device_map': mapping,
    }

    return ec2_args
コード例 #36
0
def do_request_spot_instance(region, secrets, moz_instance_type, price, ami,
                             instance_config, cached_cert_dir, instance_type,
                             availability_zone, dryrun):
    conn = aws_connect_to_region(region, secrets)
    interface = get_available_interface(conn=conn,
                                        moz_instance_type=moz_instance_type,
                                        availability_zone=availability_zone)
    if not interface:
        raise RuntimeError("No free network interfaces left in %s" % region)

    # TODO: check DNS
    fqdn = interface.tags.get("FQDN")
    if not fqdn:
        raise RuntimeError("Skipping %s without FQDN" % interface)

    log.debug("Spot request for %s (%s)", fqdn, price)

    if dryrun:
        log.info("Dry run. skipping")
        return

    spec = NetworkInterfaceSpecification(network_interface_id=interface.id)
    nc = NetworkInterfaceCollection(spec)
    ip = interface.private_ip_address
    certs = get_puppet_certs(ip, secrets, cached_cert_dir)
    user_data = """
FQDN="%(fqdn)s"
cd /var/lib/puppet/ssl || exit 1
%(certs)s
cd -
""" % dict(fqdn=fqdn, certs=certs)
    if instance_config[region].get("lvm"):
        user_data += """
mkdir -p /etc/lvm-init/
cat <<EOF > /etc/lvm-init/lvm-init.json
%s
EOF
/sbin/lvm-init
""" % json.dumps(instance_config[region])

    bdm = BlockDeviceMapping()
    for device, device_info in instance_config[region]['device_map'].items():
        bd = BlockDeviceType()
        if device_info.get('size'):
            bd.size = device_info['size']
        if ami.root_device_name == device:
            ami_size = ami.block_device_mapping[device].size
            if ami.virtualization_type == "hvm":
                # Overwrite root device size for HVM instances, since they
                # cannot be resized online
                bd.size = ami_size
            elif device_info.get('size'):
                # make sure that size is enough for this AMI
                assert ami_size <= device_info['size'], \
                    "Instance root device size cannot be smaller than AMI " \
                    "root device"
        if device_info.get("delete_on_termination") is not False:
            bd.delete_on_termination = True
        if device_info.get("ephemeral_name"):
            bd.ephemeral_name = device_info["ephemeral_name"]

        bdm[device] = bd

    sir = conn.request_spot_instances(
        price=str(price),
        image_id=ami.id,
        count=1,
        instance_type=instance_type,
        key_name=instance_config[region]["ssh_key"],
        user_data=user_data,
        block_device_map=bdm,
        network_interfaces=nc,
        instance_profile_name=instance_config[region].get(
            "instance_profile_name"),
    )
    sir[0].add_tag("moz-type", moz_instance_type)
コード例 #37
0
ファイル: api.py プロジェクト: velankanisys/eucalyptus
 def handleImages(self, action, clc, callback=None):
     if action == 'DescribeImages':
         owner = self.get_argument('Owner', None)
         if not owner:
             owners = None
         else:
             owners = [owner]
         return clc.get_all_images(owners, callback)
     elif action == 'DescribeImageAttribute':
         imageid = self.get_argument('ImageId')
         attribute = self.get_argument('Attribute')
         return clc.get_image_attribute(imageid, attribute, callback)
     elif action == 'ModifyImageAttribute':
         imageid = self.get_argument('ImageId')
         attribute = self.get_argument('Attribute')
         operation = self.get_argument('OperationType')
         users = self.get_argument_list('UserId')
         groups = self.get_argument_list('UserGroup')
         return clc.modify_image_attribute(imageid, attribute, operation,
                                           users, groups, callback)
     elif action == 'ResetImageAttribute':
         imageid = self.get_argument('ImageId')
         attribute = self.get_argument('Attribute')
         return clc.reset_image_attribute(imageid, attribute, callback)
     elif action == 'DeregisterImage':
         image_id = self.get_argument('ImageId')
         return clc.deregister_image(image_id, callback)
     elif action == 'RegisterImage':
         image_location = self.get_argument('ImageLocation', None)
         name = self.get_argument('Name')
         description = self.get_argument('Description', None)
         if description != None:
             description = base64.b64decode(description)
         architecture = self.get_argument('Architecture', None)
         kernel_id = self.get_argument('KernelId', None)
         ramdisk_id = self.get_argument('RamdiskId', None)
         root_dev_name = self.get_argument('RootDeviceName', None)
         snapshot_id = self.get_argument('SnapshotId', None)
         # get block device mappings
         bdm = BlockDeviceMapping()
         mapping = self.get_argument('BlockDeviceMapping.1.DeviceName',
                                     None)
         idx = 1
         while mapping:
             pre = 'BlockDeviceMapping.%d' % idx
             dev_name = mapping
             block_dev_type = BlockDeviceType()
             block_dev_type.ephemeral_name = self.get_argument(
                 '%s.VirtualName' % pre, None)
             if not (block_dev_type.ephemeral_name):
                 block_dev_type.no_device = \
                     (self.get_argument('%s.NoDevice' % pre, '') == 'true')
                 block_dev_type.snapshot_id = \
                         self.get_argument('%s.Ebs.SnapshotId' % pre, None)
                 block_dev_type.size = \
                         self.get_argument('%s.Ebs.VolumeSize' % pre, None)
                 block_dev_type.delete_on_termination = \
                         (self.get_argument('%s.Ebs.DeleteOnTermination' % pre, '') == 'true')
             bdm[dev_name] = block_dev_type
             idx += 1
             mapping = self.get_argument(
                 'BlockDeviceMapping.%d.DeviceName' % idx, None)
         if snapshot_id:
             rootbdm = BlockDeviceType()
             rootbdm.snapshot_id = snapshot_id
             bdm['/dev/sda1'] = rootbdm
         if len(bdm) == 0:
             bdm = None
         return clc.register_image(name, image_location, description,
                                   architecture, kernel_id, ramdisk_id,
                                   root_dev_name, bdm, callback)
コード例 #38
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-k",
                        "--secrets",
                        type=argparse.FileType('r'),
                        help="optional file where secrets can be found")
    parser.add_argument("-r",
                        "--region",
                        dest="region",
                        required=True,
                        help="optional list of regions")
    parser.add_argument("-q",
                        "--quiet",
                        action="store_true",
                        help="Supress logging messages")
    parser.add_argument("-c", "--ami-config", required=True, help="AMI config")
    parser.add_argument("-i",
                        "--instance-config",
                        required=True,
                        help="Instance config")
    parser.add_argument("--ssh-key", required=True, help="SSH key name")
    parser.add_argument("--user", help="Login name")
    parser.add_argument("--public",
                        action="store_true",
                        default=False,
                        help="Generate a public AMI (no secrets)")

    args = parser.parse_args()
    if args.secrets:
        secrets = json.load(args.secrets)
    else:
        secrets = None
    try:
        ami_config = json.load(
            open("%s/%s.json" %
                 (AMI_CONFIGS_DIR, args.ami_config)))[args.region]
        moz_type_config = json.load(
            open("%s/%s" %
                 (INSTANCE_CONFIGS_DIR, args.instance_config)))[args.region]
    except KeyError:
        parser.error("unknown configuration")

    logging.basicConfig(format="%(asctime)s - %(levelname)s - %(message)s")
    if not args.quiet:
        log.setLevel(logging.DEBUG)
    else:
        log.setLevel(logging.ERROR)

    if secrets:
        conn = connect_to_region(
            args.region,
            aws_access_key_id=secrets['aws_access_key_id'],
            aws_secret_access_key=secrets['aws_secret_access_key'])
    else:
        conn = connect_to_region(args.region)

    dated_target_name = "spot-%s-%s" % (
        args.ami_config, time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
    filters = {"tag:moz-state": "ready", "instance-state-name": "stopped"}
    for tag, value in moz_type_config["tags"].iteritems():
        filters["tag:%s" % tag] = value
    using_stopped_instance = True
    res = conn.get_all_instances(filters=filters)
    if not res:
        filters["instance-state-name"] = "running"
        res = conn.get_all_instances(filters=filters)
        using_stopped_instance = False
    instances = reduce(lambda a, b: a + b, [r.instances for r in res])
    # skip loaned instances
    instances = [i for i in instances if not i.tags.get("moz-loaned-to")]
    i = sorted(instances, key=lambda i: i.launch_time)[-1]
    log.debug("Selected instance to clone: %s", i)
    v_id = i.block_device_mapping[i.root_device_name].volume_id
    v = conn.get_all_volumes(volume_ids=[v_id])[0]
    snap1 = v.create_snapshot("temporary snapshot of %s" % v_id)

    wait_for_status(snap1, "status", "completed", "update")
    host_instance = create_instance(connection=conn,
                                    instance_name="tmp",
                                    config=ami_config,
                                    key_name=args.ssh_key,
                                    user=args.user,
                                    subnet_id=random.choice(
                                        moz_type_config["subnet_ids"]))

    env.host_string = host_instance.private_ip_address
    env.user = '******'
    env.abort_on_prompts = True
    env.disable_known_hosts = True
    int_dev_name = ami_config['target']['int_dev_name']
    mount_dev = int_dev_name
    mount_point = ami_config['target']['mount_point']
    virtualization_type = ami_config.get("virtualization_type")
    if virtualization_type == "hvm":
        mount_dev = "%s1" % mount_dev
    tmp_v = conn.create_volume(size=snap1.volume_size,
                               zone=host_instance.placement,
                               snapshot=snap1)
    wait_for_status(tmp_v, "status", "available", "update")
    while True:
        try:
            tmp_v.attach(host_instance.id,
                         ami_config['target']['aws_dev_name'])
            break
        except:
            log.debug('hit error waiting for volume to be attached')
            time.sleep(10)
    while True:
        try:
            tmp_v.update()
            if tmp_v.status == 'in-use':
                if run('ls %s' % int_dev_name).succeeded:
                    break
        except:
            log.debug('hit error waiting for volume to be attached')
            time.sleep(10)
    run('mkdir -p %s' % mount_point)
    run('mount {dev} {mount_point}'.format(dev=mount_dev,
                                           mount_point=mount_point))
    with cd(mount_point):
        run("rm -f root/*.sh")
        run("rm -f root/*.log")
        run("rm -f root/userdata")
        run("rm -f root/*.done")
        run("rm -f etc/spot_setup.done")
        run("rm -f var/lib/puppet/ssl/private_keys/*")
        run("rm -f var/lib/puppet/ssl/certs/*")
        if not using_stopped_instance or args.public:
            run("rm -rf builds/slave")
        else:
            run("rm -f builds/slave/buildbot.tac")
        run("echo localhost > etc/hostname")
        run("sed -i -e 's/127.0.0.1.*/127.0.0.1 localhost/g' etc/hosts")
        if args.public:
            # put rc.local
            put("%s/%s/etc/rc.local" % (AMI_CONFIGS_DIR, args.ami_config),
                "etc/rc.local",
                mirror_local_mode=True)
            run("rm -rf home/cltbld/.ssh")
            run("rm -rf root/.ssh/*")
            run("rm -rf builds/gapi.data")
            run("rm -rf builds/mock_mozilla/*/root/home/mock_mozilla")
        else:
            put("%s/spot_setup.sh" % AMI_CONFIGS_DIR,
                "etc/spot_setup.sh",
                mirror_local_mode=True)
            # replace puppet init with our script
            if ami_config["distro"] == "ubuntu":
                put("%s/spot_setup.conf" % AMI_CONFIGS_DIR,
                    "etc/init/puppet.conf",
                    mirror_local_mode=True)
            else:
                run("echo '/etc/spot_setup.sh' > etc/init.d/puppet")
    # create snapshot2
    log.info('Terminating %s', host_instance)
    host_instance.terminate()
    wait_for_status(tmp_v, "status", "available", "update")
    log.info('Creating a snapshot')
    snap2 = tmp_v.create_snapshot(dated_target_name)
    wait_for_status(snap2, "status", "completed", "update")
    snap2.add_tag("Name", dated_target_name)

    bdm = BlockDeviceMapping()
    bdm[i.root_device_name] = BlockDeviceType(snapshot_id=snap2.id)

    log.info('Creating AMI')

    if virtualization_type == "hvm":
        kernel_id = None
    else:
        kernel_id = i.kernel

    ami_id = conn.register_image(
        dated_target_name,
        dated_target_name,
        architecture=ami_config["arch"],
        kernel_id=kernel_id,
        root_device_name=i.root_device_name,
        block_device_map=bdm,
        virtualization_type=virtualization_type,
    )
    log.info('Waiting...')
    while True:
        try:
            ami = conn.get_image(ami_id)
            ami.add_tag('Name', dated_target_name)
            ami.add_tag('moz-created', int(time.mktime(time.gmtime())))
            for tag, value in moz_type_config["tags"].iteritems():
                ami.add_tag(tag, value)
            log.info('AMI created')
            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
            break
        except:
            log.info('Wating for AMI')
            time.sleep(10)
    # Step 7: Cleanup
    log.info('Cleanup...')
    tmp_v.delete()
    snap1.delete()
コード例 #39
0
 def setUp(self):
     self.block_device_type = BlockDeviceType()
コード例 #40
0
def test_create_launch_configuration_with_block_device_mappings():
    block_device_mapping = BlockDeviceMapping()

    ephemeral_drive = BlockDeviceType()
    ephemeral_drive.ephemeral_name = "ephemeral0"
    block_device_mapping["/dev/xvdb"] = ephemeral_drive

    snapshot_drive = BlockDeviceType()
    snapshot_drive.snapshot_id = "snap-1234abcd"
    snapshot_drive.volume_type = "standard"
    block_device_mapping["/dev/xvdp"] = snapshot_drive

    ebs_drive = BlockDeviceType()
    ebs_drive.volume_type = "io1"
    ebs_drive.size = 100
    ebs_drive.iops = 1000
    ebs_drive.delete_on_termination = False
    block_device_mapping["/dev/xvdh"] = ebs_drive

    conn = boto.connect_autoscale(use_block_device_types=True)
    config = LaunchConfiguration(
        name="tester",
        image_id="ami-abcd1234",
        instance_type="m1.small",
        key_name="the_keys",
        security_groups=["default", "default2"],
        user_data=b"This is some user_data",
        instance_monitoring=True,
        instance_profile_name="arn:aws:iam::{}:instance-profile/testing".
        format(ACCOUNT_ID),
        spot_price=0.1,
        block_device_mappings=[block_device_mapping],
    )
    conn.create_launch_configuration(config)

    launch_config = conn.get_all_launch_configurations()[0]
    launch_config.name.should.equal("tester")
    launch_config.image_id.should.equal("ami-abcd1234")
    launch_config.instance_type.should.equal("m1.small")
    launch_config.key_name.should.equal("the_keys")
    set(launch_config.security_groups).should.equal(
        set(["default", "default2"]))
    launch_config.user_data.should.equal(b"This is some user_data")
    launch_config.instance_monitoring.enabled.should.equal("true")
    launch_config.instance_profile_name.should.equal(
        "arn:aws:iam::{}:instance-profile/testing".format(ACCOUNT_ID))
    launch_config.spot_price.should.equal(0.1)
    len(launch_config.block_device_mappings).should.equal(3)

    returned_mapping = launch_config.block_device_mappings

    set(returned_mapping.keys()).should.equal(
        set(["/dev/xvdb", "/dev/xvdp", "/dev/xvdh"]))

    returned_mapping["/dev/xvdh"].iops.should.equal(1000)
    returned_mapping["/dev/xvdh"].size.should.equal(100)
    returned_mapping["/dev/xvdh"].volume_type.should.equal("io1")
    returned_mapping["/dev/xvdh"].delete_on_termination.should.be.false

    returned_mapping["/dev/xvdp"].snapshot_id.should.equal("snap-1234abcd")
    returned_mapping["/dev/xvdp"].volume_type.should.equal("standard")

    returned_mapping["/dev/xvdb"].ephemeral_name.should.equal("ephemeral0")
コード例 #41
0
def create_cluster(args):
    args = load_config(args)

    conn = boto.ec2.connect_to_region(
        args["region"],
        aws_access_key_id=args["aws_access_key_id"],
        aws_secret_access_key=args["aws_secret_access_key"])
    create_default_security_group(conn)
    if len(query_status(args)):
        print("Error: A cluster with the name '{}' exists. ".format(args["name"]) +
              "Please choose a different cluster name.\n" +
              "Note: If you want to check the status of the cluster '{}', ".format(args["name"]) +
              "please use `aws-jupyter check`.")
        return False

    # TODO: removed "--associate-public-ip-address" from the options, check if things still work
    print("Creating the cluster...")
    # Declare the block device mapping for ephemeral disks
    # TODO: adjust mount points, read this: https://cloudinit.readthedocs.io/en/latest/topics/examples.html#adjust-mount-points-mounted
    device_mapping = BlockDeviceMapping()
    for i in range(24):
        eph = BlockDeviceType()
        eph.ephemeral_name = 'ephemeral%d' % i
        device_mapping['/dev/sd{}'.format(chr(ord('b') + i))] = eph
    if args["spot"] > 0.0:
        print("We will use spot instances.")
        try:
            reservation = conn.request_spot_instances(
                price=float(args["spot"]),
                placement=f"{args['region']}a",
                image_id=args["ami"],
                count=args["count"],
                type='one-time',
                key_name=args["key"],
                security_groups=[SECURITY_GROUP_NAME],
                instance_type=args["type"],
                block_device_map=device_mapping,
                dry_run=False)
        except EC2ResponseError as e:
            err_msg = e.message
            print("Error: " + err_msg)
            if "image id" in err_msg.lower():
                print("If you are using the default AMI, try upgrade aws-jupyter " +
                "by `pip install --upgrade aws-jupyter`.")
            return False
        request_ids = [r.id for r in reservation]
        print("Please wait till the spot instances are fullfilled", end='')
        i = 0
        instance_ids = []
        while i < len(request_ids):
            request_id = request_ids[i]
            try:
                spot_req = conn.get_all_spot_instance_requests(request_ids=[request_id])[0]
            except EC2ResponseError:
                print(";", end='')
                sleep(2)
                continue
            if spot_req.state == 'failed':
                print("\nError: Spot request failed")
                # TODO: cancel the spot request
                return False
            if not spot_req.instance_id:
                print(".", end='')
                sleep(2)
                continue
            instance_ids.append(spot_req.instance_id)
            i += 1
        print()
    else:
        print("We will use on-demand instances.")
        try:
            reservation = conn.run_instances(
                args["ami"],
                min_count=args["count"],
                max_count=args["count"],
                key_name=args["key"],
                security_groups=[SECURITY_GROUP_NAME],
                instance_type=args["type"],
                block_device_map=device_mapping,
                dry_run=False)
        except EC2ResponseError as e:
            err_msg = e.message
            print("Error: " + err_msg)
            if "image id" in err_msg.lower():
                print("If you are using the default AMI, try upgrade aws-jupyter " +
                "by `pip install --upgrade aws-jupyter`.")
            return False
        instance_ids = [instance.id for instance in reservation.instances]
    print("Setting tags.")
    conn.create_tags(instance_ids, {"cluster-name": args["name"]})
    print("Launched instances:")
    for instance in instance_ids:
        if args["spot"] > 0.0:
            print("{} (spot)".format(instance))
        else:
            print("{} (on demand)".format(instance))
    print("Done.")
    return True
コード例 #42
0
# http://www.onepwr.org/2012/04/26/chef-recipe-to-setup-up-a-new-nodes-fqdn-hostname-etc-properly/
#
if check_repo_state(reponame, github_temporary_token):
    raise RuntimeError(
        'Repository {} already exists, not going to run EC2'.format(reponame))

with open(join(dirname(__file__), 'bizarro', 'setup', 'user-data.sh')) as file:
    user_data = file.read().format(
        branch_name='deployed-instance-details-#126',
        ga_client_id=gdocs_client_id,
        ga_client_secret=gdocs_client_secret,
        github_temporary_token=github_temporary_token,
        github_repo=reponame,
        auth_data_href=sheet_url)

device_sda1 = BlockDeviceType(size=16, delete_on_termination=True)
device_map = BlockDeviceMapping()
device_map['/dev/sda1'] = device_sda1

ec2_args = dict(instance_type='c3.large',
                user_data=user_data,
                key_name='cfa-keypair-2015',
                block_device_map=device_map,
                security_groups=['default'])

instance = ec2.run_instances('ami-f8763a90', **ec2_args).instances[0]
instance.add_tag('Name', 'Ceviche Test {}'.format(reponame))

print '    Prepared EC2 instance', instance.id

while not instance.dns_name:
コード例 #43
0
ファイル: ec2_ami.py プロジェクト: zhostasa/ansible
def create_image(module, ec2):
    """
    Creates new AMI

    module : AnsibleModule object
    ec2: authenticated ec2 connection object
    """

    instance_id = module.params.get('instance_id')
    name = module.params.get('name')
    wait = module.params.get('wait')
    wait_timeout = int(module.params.get('wait_timeout'))
    description = module.params.get('description')
    architecture = module.params.get('architecture')
    kernel_id = module.params.get('kernel_id')
    root_device_name = module.params.get('root_device_name')
    virtualization_type = module.params.get('virtualization_type')
    no_reboot = module.params.get('no_reboot')
    device_mapping = module.params.get('device_mapping')
    tags = module.params.get('tags')
    launch_permissions = module.params.get('launch_permissions')

    try:
        params = {'name': name, 'description': description}

        images = ec2.get_all_images(filters={'name': name})

        if images and images[0]:
            # ensure that launch_permissions are up to date
            update_image(module, ec2, images[0].id)

        bdm = None
        if device_mapping:
            bdm = BlockDeviceMapping()
            for device in device_mapping:
                if 'device_name' not in device:
                    module.fail_json(msg='Device name must be set for volume')
                device_name = device['device_name']
                del device['device_name']
                bd = BlockDeviceType(**device)
                bdm[device_name] = bd

        if instance_id:
            params['instance_id'] = instance_id
            params['no_reboot'] = no_reboot
            if bdm:
                params['block_device_mapping'] = bdm
            image_id = ec2.create_image(**params)
        else:
            params['architecture'] = architecture
            params['virtualization_type'] = virtualization_type
            if kernel_id:
                params['kernel_id'] = kernel_id
            if root_device_name:
                params['root_device_name'] = root_device_name
            if bdm:
                params['block_device_map'] = bdm
            image_id = ec2.register_image(**params)
    except boto.exception.BotoServerError as e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

    # Wait until the image is recognized. EC2 API has eventual consistency,
    # such that a successful CreateImage API call doesn't guarantee the success
    # of subsequent DescribeImages API call using the new image id returned.
    for i in range(wait_timeout):
        try:
            img = ec2.get_image(image_id)

            if img.state == 'available':
                break
            elif img.state == 'failed':
                module.fail_json(
                    msg=
                    "AMI creation failed, please see the AWS console for more details"
                )
        except boto.exception.EC2ResponseError as e:
            if ('InvalidAMIID.NotFound' not in e.error_code
                    and 'InvalidAMIID.Unavailable'
                    not in e.error_code) and wait and i == wait_timeout - 1:
                module.fail_json(
                    msg=
                    "Error while trying to find the new image. Using wait=yes and/or a longer "
                    "wait_timeout may help. %s: %s" %
                    (e.error_code, e.error_message))
        finally:
            time.sleep(1)

    if img.state != 'available':
        module.fail_json(
            msg=
            "Error while trying to find the new image. Using wait=yes and/or a longer wait_timeout may help."
        )

    if tags:
        try:
            ec2.create_tags(image_id, tags)
        except boto.exception.EC2ResponseError as e:
            module.fail_json(msg="Image tagging failed => %s: %s" %
                             (e.error_code, e.error_message))
    if launch_permissions:
        try:
            img = ec2.get_image(image_id)
            img.set_launch_permissions(**launch_permissions)
        except boto.exception.BotoServerError as e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message),
                             image_id=image_id)

    module.exit_json(msg="AMI creation operation complete",
                     changed=True,
                     **get_ami_info(img))
コード例 #44
0
def launch_cluster(conn, opts, cluster_name):
    if opts.identity_file is None:
        print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
        sys.exit(1)
    if opts.key_pair is None:
        print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
        sys.exit(1)

    user_data_content = None
    if opts.user_data:
        with open(opts.user_data) as user_data_file:
            user_data_content = user_data_file.read()

    print "Setting up security groups..."
    if opts.security_group_prefix is None:
        master_group = get_or_make_group(conn, cluster_name + "-master")
        slave_group = get_or_make_group(conn, cluster_name + "-slaves")
    else:
        master_group = get_or_make_group(conn, opts.security_group_prefix + "-master")
        slave_group = get_or_make_group(conn, opts.security_group_prefix + "-slaves")
    authorized_address = opts.authorized_address
    if master_group.rules == []:  # Group was just now created
        master_group.authorize(src_group=master_group)
        master_group.authorize(src_group=slave_group)
        master_group.authorize('tcp', 22, 22, authorized_address)
        master_group.authorize('tcp', 8080, 8081, authorized_address)
        master_group.authorize('tcp', 18080, 18080, authorized_address)
        master_group.authorize('tcp', 19999, 19999, authorized_address)
        master_group.authorize('tcp', 50030, 50030, authorized_address)
        master_group.authorize('tcp', 50070, 50070, authorized_address)
        master_group.authorize('tcp', 60070, 60070, authorized_address)
        master_group.authorize('tcp', 4040, 4045, authorized_address)
        if opts.ganglia:
            master_group.authorize('tcp', 5080, 5080, authorized_address)
    if slave_group.rules == []:  # Group was just now created
        slave_group.authorize(src_group=master_group)
        slave_group.authorize(src_group=slave_group)
        slave_group.authorize('tcp', 22, 22, authorized_address)
        slave_group.authorize('tcp', 8080, 8081, authorized_address)
        slave_group.authorize('tcp', 50060, 50060, authorized_address)
        slave_group.authorize('tcp', 50075, 50075, authorized_address)
        slave_group.authorize('tcp', 60060, 60060, authorized_address)
        slave_group.authorize('tcp', 60075, 60075, authorized_address)

    # Check if instances are already running with the cluster name
    existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
                                                             die_on_error=False)
    if existing_slaves or (existing_masters and not opts.use_existing_master):
        print >> stderr, ("ERROR: There are already instances for name: %s " % cluster_name)
        sys.exit(1)

    # Figure out Spark AMI
    if opts.ami is None:
        opts.ami = get_spark_ami(opts)

    additional_groups = []
    if opts.additional_security_group:
        additional_groups = [sg
                             for sg in conn.get_all_security_groups()
                             if opts.additional_security_group in (sg.name, sg.id)]
    print "Launching instances..."

    try:
        image = conn.get_all_images(image_ids=[opts.ami])[0]
    except:
        print >> stderr, "Could not find AMI " + opts.ami
        sys.exit(1)

    # Create block device mapping so that we can add EBS volumes if asked to.
    # The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
    block_map = BlockDeviceMapping()
    if opts.ebs_vol_size > 0:
        for i in range(opts.ebs_vol_num):
            device = EBSBlockDeviceType()
            device.size = opts.ebs_vol_size
            device.volume_type = opts.ebs_vol_type
            device.delete_on_termination = True
            block_map["/dev/sd" + chr(ord('s') + i)] = device

    # AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
    if opts.instance_type.startswith('m3.'):
        for i in range(get_num_disks(opts.instance_type)):
            dev = BlockDeviceType()
            dev.ephemeral_name = 'ephemeral%d' % i
            # The first ephemeral drive is /dev/sdb.
            name = '/dev/sd' + string.letters[i + 1]
            block_map[name] = dev

    # Launch slaves
    if opts.spot_price is not None:
        # Launch spot instances with the requested price
        print ("Requesting %d slaves as spot instances with price $%.3f" %
               (opts.slaves, opts.spot_price))
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        my_req_ids = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            slave_reqs = conn.request_spot_instances(
                price=opts.spot_price,
                image_id=opts.ami,
                launch_group="launch-group-%s" % cluster_name,
                placement=zone,
                count=num_slaves_this_zone,
                key_name=opts.key_pair,
                security_groups=[slave_group] + additional_groups,
                instance_type=opts.instance_type,
                block_device_map=block_map,
                user_data=user_data_content)
            my_req_ids += [req.id for req in slave_reqs]
            i += 1

        print "Waiting for spot instances to be granted..."
        try:
            while True:
                time.sleep(10)
                reqs = conn.get_all_spot_instance_requests()
                id_to_req = {}
                for r in reqs:
                    id_to_req[r.id] = r
                active_instance_ids = []
                outstanding_request_ids = []
                for i in my_req_ids:
                    if i in id_to_req:
                        if id_to_req[i].state == "active":
                            active_instance_ids.append(id_to_req[i].instance_id)
                        else:
                            outstanding_request_ids.append(i)
                if len(active_instance_ids) == opts.slaves:
                    print "All %d slaves granted" % opts.slaves
                    reservations = conn.get_all_instances(active_instance_ids)
                    slave_nodes = []
                    for r in reservations:
                        slave_nodes += r.instances
                    break
                else:
                    print "%d of %d slaves granted, waiting longer for request ids including %s" % (
                        len(active_instance_ids), opts.slaves, outstanding_request_ids[0:10])
        except:
            print "Canceling spot instance requests"
            conn.cancel_spot_instance_requests(my_req_ids)
            # Log a warning if any of these requests actually launched instances:
            (master_nodes, slave_nodes) = get_existing_cluster(
                conn, opts, cluster_name, die_on_error=False)
            running = len(master_nodes) + len(slave_nodes)
            if running:
                print >> stderr, ("WARNING: %d instances are still running" % running)
            sys.exit(0)
    else:
        # Launch non-spot instances
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        slave_nodes = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            if num_slaves_this_zone > 0:
                slave_res = image.run(key_name=opts.key_pair,
                                      security_groups=[slave_group] + additional_groups,
                                      instance_type=opts.instance_type,
                                      placement=zone,
                                      min_count=num_slaves_this_zone,
                                      max_count=num_slaves_this_zone,
                                      block_device_map=block_map,
                                      user_data=user_data_content)
                slave_nodes += slave_res.instances
                print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
                                                                zone, slave_res.id)
            i += 1

    # Launch or resume masters
    if existing_masters:
        print "Starting master..."
        for inst in existing_masters:
            if inst.state not in ["shutting-down", "terminated"]:
                inst.start()
        master_nodes = existing_masters
    else:
        master_type = opts.master_instance_type
        if master_type == "":
            master_type = opts.instance_type
        if opts.zone == 'all':
            opts.zone = random.choice(conn.get_all_zones()).name
        master_res = image.run(key_name=opts.key_pair,
                               security_groups=[master_group] + additional_groups,
                               instance_type=master_type,
                               placement=opts.zone,
                               min_count=1,
                               max_count=1,
                               block_device_map=block_map,
                               user_data=user_data_content)
        master_nodes = master_res.instances
        print "Launched master in %s, regid = %s" % (zone, master_res.id)

    # Give the instances descriptive names
    for master in master_nodes:
        name = '{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id)
        tag_instance(master, name)

    for slave in slave_nodes:
        name = '{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id)
        tag_instance(slave, name)

    # Return all the instances
    return (master_nodes, slave_nodes)
コード例 #45
0
ファイル: ec2_vol.py プロジェクト: shreddy0362/vagrant-
def modify_dot_attribute(module, ec2, instance, device_name):
    """ Modify delete_on_termination attribute """

    delete_on_termination = module.params.get('delete_on_termination')
    changed = False

    try:
        instance.update()
        dot = instance.block_device_mapping[device_name].delete_on_termination
    except boto.exception.BotoServerError, e:
        module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))

    if delete_on_termination != dot:
        try:
            bdt = BlockDeviceType(delete_on_termination=delete_on_termination)
            bdm = BlockDeviceMapping()
            bdm[device_name] = bdt

            ec2.modify_instance_attribute(instance_id=instance.id,
                                          attribute='blockDeviceMapping',
                                          value=bdm)

            while instance.block_device_mapping[
                    device_name].delete_on_termination != delete_on_termination:
                time.sleep(3)
                instance.update()
            changed = True
        except boto.exception.BotoServerError, e:
            module.fail_json(msg="%s: %s" % (e.error_code, e.error_message))
コード例 #46
0
            #print i.instance_type
            #print i.instance_profile
            print '-----'


region_name = 'us-west-1'
for r in boto.ec2.regions():
    if r.name == region_name:
        break
conn = boto.connect_ec2(region=r)

#print conn.run_instances(image_id='ami-75287b30')
#print conn.run_instances(image_id='ami-71287b34')

mapping = BlockDeviceMapping()
eph0 = BlockDeviceType()
eph1 = BlockDeviceType()
eph0.ephemeral_name = 'ephemeral0'
eph1.ephemeral_name = 'ephemeral1'
mapping['/dev/xvdc'] = eph0
mapping['/dev/xvdd'] = eph1

print conn.run_instances(image_id='ami-75287b30',
                         instance_type='m1.medium',
                         key_name='debian6',
                         block_device_map=mapping)

#print conn.terminate_instances(instance_ids=['i-8bd812d3'])
#print sys.argv[1:]

#for i in conn.get_all_images(filters={ "architecture":"x86_64", "state":"available", 'image-id':'ami-75287b30' }):
コード例 #47
0
ファイル: run.py プロジェクト: tkardi/machine
def run_ec2(args):
    tempdir = mkdtemp(prefix='oa-')
    tarball = prepare_tarball(tempdir, args.repository, args.branch)

    _L.info('Created repository archive at {}'.format(tarball))

    #
    # Prepare init script for new EC2 instance to run.
    #
    with open(join(dirname(__file__), 'templates', 'user-data.sh')) as file:
        user_data = file.read().format(**args.__dict__)

    _L.info('Prepared {} bytes of instance user data for tag {}'.format(
        len(user_data), args.branch))

    #
    # Figure out how much we're willing to bid on a spot instance.
    #
    ec2_access_key = args.ec2_access_key or environ.get(
        'EC2_ACCESS_KEY_ID', args.access_key)
    ec2_secret_key = args.ec2_secret_key or environ.get(
        'EC2_SECRET_ACCESS_KEY', args.secret_key)
    ec2 = EC2Connection(ec2_access_key, ec2_secret_key)

    bid = get_bid_amount(ec2, args.ec2_instance_type, args.bid_strategy)
    _L.info('Bidding ${:.4f}/hour for {} instance'.format(
        bid, args.ec2_instance_type))

    #
    # Request a spot instance with 200GB storage.
    #
    device_sda1 = BlockDeviceType(size=200, delete_on_termination=True)
    device_map = BlockDeviceMapping()
    device_map['/dev/sda1'] = device_sda1

    spot_args = dict(instance_type=args.ec2_instance_type,
                     user_data=user_data,
                     key_name=args.ec2_ssh_keypair,
                     block_device_map=device_map,
                     security_groups=[args.ec2_security_group])

    spot_req = ec2.request_spot_instances(bid, args.ec2_machine_image,
                                          **spot_args)[0]

    _L.info(
        'https://console.aws.amazon.com/ec2/v2/home?region=us-east-1#SpotInstances:search={}'
        .format(spot_req.id))

    #
    # Wait while EC2 does its thing, unless the user interrupts.
    #
    try:
        instance = wait_for_setup(spot_req, time() + 15 * 60)
        upload_tarball(tarball, instance.public_dns_name, args.identity_file,
                       time() + 3 * 60)
        wait_for_process(instance, time() + 12 * 60 * 60)

    except RuntimeError as e:
        _L.warning(e.message)

        if e.message is OVERDUE_PROCESS_ALL:
            # Instance was set up, but ran out of time. Get its log.
            logfile = join(tempdir, 'cloud-init-output.log')
            client = connect_ssh(instance.public_dns_name, args.identity_file)
            client.open_sftp().get('/var/log/cloud-init-output.log', logfile)

            with open(logfile) as file:
                _L.info(
                    '/var/log/cloud-init-output.log contents:\n\n{}\n'.format(
                        file.read()))

    finally:
        spot_req = ec2.get_all_spot_instance_requests(spot_req.id)[0]

        if spot_req.instance_id:
            print('Shutting down instance {} early'.format(
                spot_req.instance_id))
            ec2.terminate_instances(spot_req.instance_id)

        spot_req.cancel()
        rmtree(tempdir)
コード例 #48
0
def create_ami(region,
               snap_id,
               force=None,
               root_dev='/dev/sda1',
               zone_name=None,
               default_arch=None,
               default_type='t1.micro',
               security_groups=''):
    """
    Creates AMI image from given snapshot.

    Force option removes prompt request and creates new instance from
    created ami image.

    region, snap_id
        specify snapshot to be processed. Snapshot description in json
        format will be used to restore instance with same parameters.
        Will automaticaly process snapshots for same instance with near
        time (10 minutes or shorter), but for other devices (/dev/sdb,
        /dev/sdc, etc);
    force
        Run instance from ami after creation without confirmation. To
        enable set value to "RUN";
    default_arch
        architecture to use if not mentioned in snapshot description;
    default_type
        instance type to use if not mentioned in snapshot description.
        Used only if ``force`` is "RUN";
    security_groups
        list of AWS Security Groups names formatted as string separated
        with semicolon ';'. Used only if ``force`` is "RUN".
    """
    conn = get_region_conn(region)
    snap = conn.get_all_snapshots(snapshot_ids=[
        snap_id,
    ])[0]
    instance_id = get_snap_instance(snap)
    _device = get_snap_device(snap)
    snaps = conn.get_all_snapshots(owner='self')
    snapshots = [
        snp for snp in snaps if get_snap_instance(snp) == instance_id
        and get_snap_device(snp) != _device and
        abs(get_snap_time(snap) - get_snap_time(snp)) <= timedelta(minutes=10)
    ]
    snapshot = sorted(snapshots, key=get_snap_time,
                      reverse=True) if snapshots else None
    # setup for building an EBS boot snapshot
    default_arch = default_arch or config.get('DEFAULT', 'ARCHITECTURE')
    arch = get_descr_attr(snap, 'Arch') or default_arch
    kernel = config.get(conn.region.name, 'KERNEL' + arch.upper())
    dev = re.match(r'^/dev/sda$', _device)  # if our instance encrypted
    if dev:
        kernel = config.get(conn.region.name, 'KERNEL_ENCR_' + arch.upper())
    ebs = EBSBlockDeviceType()
    ebs.snapshot_id = snap_id
    ebs.delete_on_termination = True
    block_map = BlockDeviceMapping()
    block_map[_device] = ebs
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    block_map['/dev/sdb'] = sdb

    if snapshot:
        for s in snapshot:
            s_dev = get_snap_device(s)
            s_ebs = EBSBlockDeviceType()
            s_ebs.delete_on_termination = True
            s_ebs.snapshot_id = s.id
            block_map[s_dev] = s_ebs

    name = 'Created {0} using access key {1}'.format(timestamp(),
                                                     conn.access_key)
    name = name.replace(":", ".").replace(" ", "_")

    # create the new AMI all options from snap JSON description:
    wait_for(snap, '100%', limit=SNAP_TIME)
    result = conn.register_image(
        name=name,
        description=snap.description,
        architecture=get_descr_attr(snap, 'Arch') or default_arch,
        root_device_name=get_descr_attr(snap, 'Root_dev_name') or root_dev,
        block_device_map=block_map,
        kernel_id=kernel)
    sleep(2)
    image = conn.get_all_images(image_ids=[
        result,
    ])[0]
    wait_for(image, 'available', limit=10 * 60)
    add_tags(image, snap.tags)

    logger.info('The new AMI ID = {0}'.format(result))

    info = ('\nEnter RUN if you want to launch instance using '
            'just created {0}: '.format(image))
    new_instance = None
    if force == 'RUN' or raw_input(info).strip() == 'RUN':
        instance_type = get_descr_attr(snap, 'Type') or default_type
        new_instance = launch_instance_from_ami(
            region,
            image.id,
            inst_type=instance_type,
            security_groups=security_groups,
            zone_name=zone_name)
    return image, new_instance
コード例 #49
0
ファイル: api.py プロジェクト: velankanisys/eucalyptus
    def handleRunInstances(self, action, clc, user_data_file, callback):
        image_id = self.get_argument('ImageId')
        min = self.get_argument('MinCount', '1')
        max = self.get_argument('MaxCount', '1')
        key = self.get_argument('KeyName', None)
        groups = self.get_argument_list('SecurityGroup')
        sec_group_ids = self.get_argument_list('SecurityGroupId')
        if user_data_file:
            user_data = user_data_file
        else:
            user_data = self.get_argument('UserData', "")
            user_data = base64.b64decode(user_data)
        addr_type = self.get_argument('AddressingType', None)
        vm_type = self.get_argument('InstanceType', None)
        placement = self.get_argument('Placement.AvailabilityZone', None)
        placement_group = self.get_argument('Placement.GroupName', None)
        tenancy = self.get_argument('Placement.Tenancy', None)
        kernel = self.get_argument('KernelId', None)
        ramdisk = self.get_argument('RamdiskId', None)
        monitoring = False
        if self.get_argument('Monitoring.Enabled', '') == 'true':
            monitoring = True
        subnet = self.get_argument('SubnetId', None)
        private_ip = self.get_argument('PrivateIpAddress', None)
        # get block device mappings
        bdm = BlockDeviceMapping()
        mapping = self.get_argument('BlockDeviceMapping.1.DeviceName', None)
        idx = 1
        while mapping:
            pre = 'BlockDeviceMapping.%d' % idx
            dev_name = mapping
            block_dev_type = BlockDeviceType()
            block_dev_type.ephemeral_name = self.get_argument(
                '%s.VirtualName' % pre, None)
            if not (block_dev_type.ephemeral_name):
                block_dev_type.no_device = \
                    (self.get_argument('%s.NoDevice' % pre, '') == 'true')
                block_dev_type.snapshot_id = \
                        self.get_argument('%s.Ebs.SnapshotId' % pre, None)
                block_dev_type.size = \
                        self.get_argument('%s.Ebs.VolumeSize' % pre, None)
                block_dev_type.delete_on_termination = \
                        (self.get_argument('%s.Ebs.DeleteOnTermination' % pre, '') == 'true')
            bdm[dev_name] = block_dev_type
            idx += 1
            mapping = self.get_argument(
                'BlockDeviceMapping.%d.DeviceName' % idx, None)
        if len(bdm) == 0:
            bdm = None

        api_termination = False
        if self.get_argument('DisableApiTermination', '') == 'true':
            api_termination = True
        instance_shutdown = False
        if self.get_argument('InstanceInitiatedShutdownBehavior',
                             '') == 'true':
            instance_shutdown = True
        token = self.get_argument('ClientToken', None)
        addition_info = self.get_argument('AdditionInfo', None)
        instance_profile_name = self.get_argument('IamInstanceProfile.Name',
                                                  None)
        instance_profile_arn = self.get_argument('IamInstanceProfile.Arn',
                                                 None)

        return clc.run_instances(
            image_id,
            min_count=min,
            max_count=max,
            key_name=key,
            security_groups=groups,
            user_data=user_data,
            addressing_type=addr_type,
            instance_type=vm_type,
            placement=placement,
            kernel_id=kernel,
            ramdisk_id=ramdisk,
            monitoring_enabled=monitoring,
            subnet_id=subnet,
            block_device_map=bdm,
            disable_api_termination=api_termination,
            instance_initiated_shutdown_behavior=instance_shutdown,
            private_ip_address=private_ip,
            placement_group=placement_group,
            client_token=token,
            security_group_ids=sec_group_ids,
            additional_info=addition_info,
            instance_profile_name=instance_profile_name,
            instance_profile_arn=instance_profile_arn,
            tenancy=tenancy,
            callback=callback)
コード例 #50
0
def create_ami(host_instance, options, config):
    connection = host_instance.connection
    env.host_string = host_instance.public_dns_name
    env.user = '******'
    env.abort_on_prompts = True
    env.disable_known_hosts = True

    target_name = options.config
    virtualization_type = config.get("virtualization_type")
    config_dir = "%s/%s" % (AMI_CONFIGS_DIR, target_name)
    dated_target_name = "%s-%s" % (
        options.config, time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
    int_dev_name = config['target']['int_dev_name']
    mount_dev = int_dev_name
    mount_point = config['target']['mount_point']

    v = connection.create_volume(config['target']['size'],
                                 host_instance.placement)
    while True:
        try:
            v.attach(host_instance.id, config['target']['aws_dev_name'])
            break
        except:
            log.debug('hit error waiting for volume to be attached')
            time.sleep(10)

    while True:
        try:
            v.update()
            if v.status == 'in-use':
                if run('ls %s' % int_dev_name).succeeded:
                    break
        except:
            log.debug('hit error waiting for volume to be attached')
            time.sleep(10)

    # Step 0: install required packages
    if config.get('distro') not in ('debian', 'ubuntu'):
        run('which MAKEDEV >/dev/null || yum install -y MAKEDEV')
    # Step 1: prepare target FS
    run('mkdir -p %s' % mount_point)
    if virtualization_type == "hvm":
        # HVM based instances use EBS disks as raw disks. They are have to be
        # partitioned first. Additionally ,"1" should the appended to get the
        # first primary device name.
        mount_dev = "%s1" % mount_dev
        run('parted -s %s -- mklabel msdos' % int_dev_name)
        run('parted -s -a optimal %s -- mkpart primary ext2 0 -1s' %
            int_dev_name)
        run('parted -s %s -- set 1 boot on' % int_dev_name)
    run('/sbin/mkfs.{fs_type} {dev}'.format(
        fs_type=config['target']['fs_type'], dev=mount_dev))
    run('/sbin/e2label {dev} {label}'.format(
        dev=mount_dev, label=config['target']['e2_label']))
    run('mount {dev} {mount_point}'.format(dev=mount_dev,
                                           mount_point=mount_point))
    run('mkdir {0}/dev {0}/proc {0}/etc'.format(mount_point))
    if config.get('distro') not in ('debian', 'ubuntu'):
        run('mount -t proc proc %s/proc' % mount_point)
        run('for i in console null zero ; '
            'do /sbin/MAKEDEV -d %s/dev -x $i ; done' % mount_point)

    # Step 2: install base system
    if config.get('distro') in ('debian', 'ubuntu'):
        run('apt-get update')
        run('which debootstrap >/dev/null || apt-get install -y debootstrap')
        run('debootstrap precise %s http://puppetagain.pub.build.mozilla.org/data/repos/apt/ubuntu/'
            % mount_point)
        run('chroot %s mount -t proc none /proc' % mount_point)
        run('mount -o bind /dev %s/dev' % mount_point)
        put('%s/releng-public.list' % AMI_CONFIGS_DIR,
            '%s/etc/apt/sources.list' % mount_point)
        with lcd(config_dir):
            put('usr/sbin/policy-rc.d',
                '%s/usr/sbin/' % mount_point,
                mirror_local_mode=True)
        run('chroot %s apt-get update' % mount_point)
        run('DEBIAN_FRONTEND=text chroot {mnt} apt-get install -y '
            'ubuntu-desktop openssh-server makedev curl grub {kernel}'.format(
                mnt=mount_point, kernel=config['kernel_package']))
        run('rm -f %s/usr/sbin/policy-rc.d' % mount_point)
        run('umount %s/dev' % mount_point)
        run('chroot %s ln -s /sbin/MAKEDEV /dev/' % mount_point)
        for dev in ('zero', 'null', 'console', 'generic'):
            run('chroot %s sh -c "cd /dev && ./MAKEDEV %s"' %
                (mount_point, dev))
        run('chroot %s apt-get clean' % mount_point)
    else:
        with lcd(config_dir):
            put('etc/yum-local.cfg', '%s/etc/yum-local.cfg' % mount_point)
            put('groupinstall', '/tmp/groupinstall')
            put('additional_packages', '/tmp/additional_packages')
        yum = 'yum -c {0}/etc/yum-local.cfg -y --installroot={0} '.format(
            mount_point)
        run('%s groupinstall "`cat /tmp/groupinstall`"' % yum)
        run('%s install `cat /tmp/additional_packages`' % yum)
        run('%s clean packages' % yum)
        # Rebuild RPM DB for cases when versions mismatch
        run('chroot %s rpmdb --rebuilddb || :' % mount_point)

    # Step 3: upload custom configuration files
    run('chroot %s mkdir -p /boot/grub' % mount_point)
    with lcd(config_dir):
        for f in ('etc/rc.local', 'etc/fstab', 'etc/hosts',
                  'etc/sysconfig/network',
                  'etc/sysconfig/network-scripts/ifcfg-eth0',
                  'etc/init.d/rc.local', 'boot/grub/device.map',
                  'etc/network/interfaces', 'boot/grub/menu.lst',
                  'boot/grub/grub.conf'):
            if os.path.exists(os.path.join(config_dir, f)):
                put(f, '%s/%s' % (mount_point, f), mirror_local_mode=True)
            else:
                log.warn("Skipping %s", f)

    # Step 4: tune configs
    run('sed -i -e s/@ROOT_DEV_LABEL@/{label}/g -e s/@FS_TYPE@/{fs}/g '
        '{mnt}/etc/fstab'.format(label=config['target']['e2_label'],
                                 fs=config['target']['fs_type'],
                                 mnt=mount_point))
    if config.get('distro') in ('debian', 'ubuntu'):
        # sanity check
        run('ls -l %s/boot/vmlinuz-%s' %
            (mount_point, config['kernel_version']))
        run('sed -i s/@VERSION@/%s/g %s/boot/grub/menu.lst' %
            (config['kernel_version'], mount_point))
    else:
        run('ln -s grub.conf %s/boot/grub/menu.lst' % mount_point)
        run('ln -s ../boot/grub/grub.conf %s/etc/grub.conf' % mount_point)
        if config.get('kernel_package') == 'kernel-PAE':
            run('sed -i s/@VERSION@/`chroot %s rpm -q '
                '--queryformat "%%{version}-%%{release}.%%{arch}.PAE" '
                '%s | tail -n1`/g %s/boot/grub/grub.conf' %
                (mount_point, config.get('kernel_package',
                                         'kernel'), mount_point))
        else:
            run('sed -i s/@VERSION@/`chroot %s rpm -q '
                '--queryformat "%%{version}-%%{release}.%%{arch}" '
                '%s | tail -n1`/g %s/boot/grub/grub.conf' %
                (mount_point, config.get('kernel_package',
                                         'kernel'), mount_point))
        if virtualization_type == "hvm":
            # See https://bugs.archlinux.org/task/30241 for the details,
            # grub-nstall doesn't handle /dev/xvd* devices properly
            grub_install_patch = os.path.join(config_dir, "grub-install.diff")
            if os.path.exists(grub_install_patch):
                put(grub_install_patch, "/tmp/grub-install.diff")
                run('which patch >/dev/null || yum install -y patch')
                run('patch -p0 -i /tmp/grub-install.diff /sbin/grub-install')
            run("grub-install --root-directory=%s --no-floppy %s" %
                (mount_point, int_dev_name))

    run("sed -i -e '/PermitRootLogin/d' -e '/UseDNS/d' "
        "-e '$ a PermitRootLogin without-password' "
        "-e '$ a UseDNS no' "
        "%s/etc/ssh/sshd_config" % mount_point)

    if config.get('distro') in ('debian', 'ubuntu'):
        pass
    else:
        manage_service("network", mount_point, "on")
        manage_service("rc.local", mount_point, "on")

    run('umount %s/proc || :' % mount_point)
    run('umount %s' % mount_point)

    v.detach()
    while True:
        try:
            v.update()
            if v.status == 'available':
                break
        except:
            log.exception('hit error waiting for volume to be detached')
            time.sleep(10)

    # Step 5: Create a snapshot
    log.info('Creating a snapshot')
    snapshot = v.create_snapshot('EBS-backed %s' % dated_target_name)
    while True:
        try:
            snapshot.update()
            if snapshot.status == 'completed':
                break
        except:
            log.exception('hit error waiting for snapshot to be taken')
            time.sleep(10)
    snapshot.add_tag('Name', dated_target_name)

    # Step 6: Create an AMI
    log.info('Creating AMI')
    host_img = connection.get_image(config['ami'])
    block_map = BlockDeviceMapping()
    block_map[host_img.root_device_name] = BlockDeviceType(
        snapshot_id=snapshot.id)
    if virtualization_type == "hvm":
        kernel_id = None
        ramdisk_id = None
    else:
        kernel_id = host_img.kernel_id
        ramdisk_id = host_img.ramdisk_id

    ami_id = connection.register_image(
        dated_target_name,
        '%s EBS AMI' % dated_target_name,
        architecture=config['arch'],
        kernel_id=kernel_id,
        ramdisk_id=ramdisk_id,
        root_device_name=host_img.root_device_name,
        block_device_map=block_map,
        virtualization_type=virtualization_type,
    )
    while True:
        try:
            ami = connection.get_image(ami_id)
            ami.add_tag('Name', dated_target_name)
            if config["target"].get("tags"):
                for tag, value in config["target"]["tags"].items():
                    log.info("Tagging %s: %s", tag, value)
                    ami.add_tag(tag, value)
            log.info('AMI created')
            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
            break
        except:
            log.info('Wating for AMI')
            time.sleep(10)

    # Step 7: Cleanup
    if not options.keep_volume:
        log.info('Deleting volume')
        v.delete()
    if not options.keep_host_instance:
        log.info('Terminating host instance')
        host_instance.terminate()

    return ami
コード例 #51
0
ファイル: amazon_service.py プロジェクト: LiliDeng/lis-test
    def newest_image(self, conn, os_type=None):
        filters = {}
        if os_type == 'ubuntu_1604':
            filters = {
                'name':
                'ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server*',
                'root_device_type': 'ebs',
                'owner-id': '099720109477'
            }
            log.info("ubuntu_1604")
        if os_type == 'ubuntu_1804':
            if self.instancetype == "m6g.4xlarge" or self.instancetype == "a1.4xlarge" or self.instancetype == "a1.metal":
                filters = {
                    'name':
                    'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-arm64-server*',
                    'root_device_type': 'ebs',
                    'owner-id': '099720109477'
                }
            else:
                filters = {
                    'name':
                    'ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server*',
                    'root_device_type': 'ebs',
                    'owner-id': '099720109477'
                }
            log.info("ubuntu_1804")
        elif os_type == 'amazon_linux':
            filters = {
                'name': 'amzn-ami-hvm-*-x86_64-gp2',
                'architecture': 'x86_64',
                'root_device_type': 'ebs'
            }
            log.info("amazon_linux")
        elif os_type == 'amazon_linux_gpu':
            filters = {
                'name': 'Deep Learning AMI (Amazon Linux) Version*',
                'architecture': 'x86_64',
                'root_device_type': 'ebs'
            }
            log.info("amazon_linux_gpu")
        else:
            log.info("os_type {} not support".format(os_type))
            return
        images = conn.get_all_images(filters=filters)
        filters_images = []
        for image in images:
            if image.platform != 'windows' and "test" not in image.name:
                filters_images.append(image)

        latest = None
        for image in filters_images:
            if not latest:
                latest = image
                continue
            if parser.parse(image.creationDate) > parser.parse(
                    latest.creationDate):
                latest = image

        root_device_name = latest.root_device_name
        if os_type == 'ubuntu_1604':
            self.device_map[root_device_name] = BlockDeviceType(
                delete_on_termination=True, size=30, volume_type="gp2")
            log.info("device_map ubuntu_1604")
        if os_type == 'ubuntu_1804':
            self.device_map[root_device_name] = BlockDeviceType(
                delete_on_termination=True, size=30, volume_type="gp2")
            log.info("device_map ubuntu_1804")
        elif os_type == 'amazon_linux':
            self.device_map[root_device_name] = BlockDeviceType(
                delete_on_termination=True, size=30, volume_type="gp2")
            log.info("device_map amazon_linux")
        elif os_type == 'amazon_linux_gpu':
            self.device_map[root_device_name] = BlockDeviceType(
                delete_on_termination=True, size=75, volume_type="gp2")
            log.info("device_map amazon_linux_gpu")
        else:
            log.info("device_map {} not support".format(os_type))
        return latest
コード例 #52
0
def spawn_worker_instance():
    # Check that the user logged in is also authorized to do this
    if not current_user.is_authorized():
        return login_manager.unauthorized()

    errors = {}

    # Check required fields
    for f in ['name', 'token']:
        val = request.form[f]
        if val is None or val.strip() == '':
            errors[f] = "This field is required"

    # Check required file
    if not request.files['public-ssh-key']:
        errors['code'] = "Public key file is required"

    # Bug 961200: Check that a proper OpenSSH public key was uploaded.
    # It should start with "ssh-rsa AAAAB3"
    pubkey = request.files['public-ssh-key'].read()
    if not validate_public_key(pubkey):
        errors[
            'public-ssh-key'] = "Supplied file does not appear to be a valid OpenSSH public key."

    if errors:
        return get_worker_params(errors, request.form)

    # Upload s3 key to bucket
    sshkey = bucket.new_key("keys/%s.pub" % request.form['token'])
    sshkey.set_contents_from_string(pubkey)

    ephemeral = app.config.get("EPHEMERAL_MAP", None)
    # Create
    boot_script = render_template(
        'boot-script.sh',
        aws_region=app.config['AWS_REGION'],
        temporary_bucket=app.config['TEMPORARY_BUCKET'],
        ssh_key=sshkey.key,
        ephemeral_map=ephemeral)

    mapping = None
    if ephemeral:
        mapping = BlockDeviceMapping()
        for device, eph_name in ephemeral.iteritems():
            mapping[device] = BlockDeviceType(ephemeral_name=eph_name)

    # Create EC2 instance
    reservation = ec2.run_instances(
        image_id=
        'ami-2cfe1a1f',  # ubuntu/images/hvm/ubuntu-vivid-15.04-amd64-server-20151006
        security_groups=app.config['SECURITY_GROUPS'],
        user_data=boot_script,
        block_device_map=mapping,
        instance_type=app.config['INSTANCE_TYPE'],
        instance_initiated_shutdown_behavior='terminate',
        client_token=request.form['token'],
        instance_profile_name=app.config['INSTANCE_PROFILE'])
    instance = reservation.instances[0]

    # Associate a few tags
    ec2.create_tags(
        [instance.id], {
            "Owner": current_user.email,
            "Name": request.form['name'],
            "Application": app.config['INSTANCE_APP_TAG']
        })

    # Send an email to the user who launched it
    params = {
        'monitoring_url': abs_url_for('monitor', instance_id=instance.id)
    }
    ses.send_email(
        source=app.config['EMAIL_SOURCE'],
        subject=("telemetry-analysis worker instance: %s (%s) launched" %
                 (request.form['name'], instance.id)),
        format='html',
        body=render_template('instance-launched-email.html', **params),
        to_addresses=[current_user.email])
    return redirect(url_for('monitor', instance_id=instance.id))
コード例 #53
0
def launch_cluster(conn, opts, cluster_name):

  #Remove known hosts to avoid "Offending key for IP ..." errors.
  known_hosts = os.environ['HOME'] + "/.ssh/known_hosts"
  if os.path.isfile(known_hosts):
    os.remove(known_hosts)
  if opts.key_pair is None:
      opts.key_pair = keypair()
      if opts.key_pair is None:
        print ( "ERROR: Must provide a key pair name (-k) to use on instances.", file=sys.stderr)
        sys.exit(1)

  if opts.profile is None:
    opts.profile = profile()
    if opts.profile is None:
      print ( "ERROR: No profile found in current host. It be provided with -p option.", file=sys.stderr)
      sys.exit(1)

  public_key = pub_key()
  user_data = Template("""#!/bin/bash
  set -e -x
  echo '$public_key' >> ~root/.ssh/authorized_keys
  echo '$public_key' >> ~ec2-user/.ssh/authorized_keys""").substitute(public_key=public_key)

  print("Setting up security groups...")
  master_group = get_or_make_group(conn, cluster_name + "-master")
  slave_group = get_or_make_group(conn, cluster_name + "-slaves")

  security_group = os.popen("curl -s http://169.254.169.254/latest/meta-data/security-groups").read()

  sparknotebook_group = get_or_make_group(conn, security_group)
  if master_group.rules == []: # Group was just now created
    master_group.authorize(src_group=master_group)
    master_group.authorize(src_group=slave_group)
    master_group.authorize(src_group=sparknotebook_group)
    master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
    master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
    master_group.authorize('tcp', 18080, 18080, '0.0.0.0/0')
    master_group.authorize('tcp', 19999, 19999, '0.0.0.0/0')
    master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
    master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
    master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
    master_group.authorize('tcp', 4040, 4045, '0.0.0.0/0')
    master_group.authorize('tcp', 7077, 7077, '0.0.0.0/0')
    if opts.ganglia:
      master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
  if slave_group.rules == []: # Group was just now created
    slave_group.authorize(src_group=master_group)
    slave_group.authorize(src_group=slave_group)
    slave_group.authorize(src_group=sparknotebook_group)
    slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
    slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
    slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
    slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
    slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
    slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')

  if not any(r for r in sparknotebook_group.rules for g in r.grants if master_group.id == g.group_id):
    sparknotebook_group.authorize(ip_protocol="tcp", from_port="1", to_port="65535", src_group=master_group)
    sparknotebook_group.authorize(ip_protocol="icmp", from_port="-1", to_port="-1", src_group=master_group)

  if not any(r for r in sparknotebook_group.rules for g in r.grants if slave_group.id == g.group_id):
    sparknotebook_group.authorize(ip_protocol="tcp", from_port="1", to_port="65535", src_group=slave_group)
    sparknotebook_group.authorize(ip_protocol="icmp", from_port="-1", to_port="-1", src_group=slave_group)

  # Check if instances are already running in our groups
  existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
                                                           die_on_error=False)
  if existing_slaves or (existing_masters and not opts.use_existing_master):
    print (("ERROR: There are already instances running in " +
        "group %s or %s" % (master_group.name, slave_group.name)), file=sys.stderr)
    sys.exit(1)

  # Figure out Spark AMI
  if opts.ami is None:
    opts.ami = get_spark_ami(opts)
  print("Launching instances...")

  try:
    image = conn.get_all_images(image_ids=[opts.ami])[0]
  except:
    print ("Could not find AMI " + opts.ami, file=sys.stderr)
    sys.exit(1)

  # Create block device mapping so that we can add an EBS volume if asked to
  block_map = BlockDeviceMapping()
  if opts.ebs_vol_size > 0:
    device = EBSBlockDeviceType()
    device.size = opts.ebs_vol_size
    device.delete_on_termination = True
    block_map["/dev/sdv"] = device


  # AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
  if opts.instance_type.startswith('m3.'):
    for i in range(get_num_disks(opts.instance_type)):
      dev = BlockDeviceType()
      dev.ephemeral_name = 'ephemeral%d' % i
      # The first ephemeral drive is /dev/sdb.
      name = '/dev/sd' + string.ascii_letters[i + 1]
      block_map[name] = dev

  # Launch slaves
  if opts.spot_price != None:
    zones = get_zones(conn, opts)

    num_zones = len(zones)
    i = 0
    my_req_ids = []

    for zone in zones:
      best_price = find_best_price(conn,opts.instance_type,zone, opts.spot_price)
      # Launch spot instances with the requested price
      print(("Requesting %d slaves as spot instances with price $%.3f/hour each (total $%.3f/hour)" %
           (opts.slaves, best_price, opts.slaves * best_price)), file=sys.stderr)

      num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
      interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnetId(), groups=[slave_group.id], associate_public_ip_address=True)
      interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)

      slave_reqs = conn.request_spot_instances(
          price = best_price,
          image_id = opts.ami,
          launch_group = "launch-group-%s" % cluster_name,
          placement = zone,
          count = num_slaves_this_zone,
          key_name = opts.key_pair,
          instance_type = opts.instance_type,
          block_device_map = block_map,
          user_data = user_data,
          instance_profile_arn = opts.profile,
          network_interfaces = interfaces)
      my_req_ids += [req.id for req in slave_reqs]
      i += 1

    print ("Waiting for spot instances to be granted", file=sys.stderr)
    try:
      while True:
        time.sleep(10)
        reqs = conn.get_all_spot_instance_requests()
        id_to_req = {}
        for r in reqs:
          id_to_req[r.id] = r
        active_instance_ids = []
        for i in my_req_ids:
          if i in id_to_req and id_to_req[i].state == "active":
            active_instance_ids.append(id_to_req[i].instance_id)
        if len(active_instance_ids) == opts.slaves:
          print ("All %d slaves granted" % opts.slaves, file=sys.stderr)
          reservations = conn.get_all_instances(active_instance_ids)
          slave_nodes = []
          for r in reservations:
            slave_nodes += r.instances
          break
        else:
          # print >> stderr, ".",
          print("%d of %d slaves granted, waiting longer" % (
            len(active_instance_ids), opts.slaves))
    except:
      print("Canceling spot instance requests", file=sys.stderr)
      conn.cancel_spot_instance_requests(my_req_ids)
      # Log a warning if any of these requests actually launched instances:
      (master_nodes, slave_nodes) = get_existing_cluster(
          conn, opts, cluster_name, die_on_error=False)
      running = len(master_nodes) + len(slave_nodes)
      if running:
        print(("WARNING: %d instances are still running" % running), file=sys.stderr)
      sys.exit(0)
  else:
    # Launch non-spot instances
    zones = get_zones(conn, opts)
    num_zones = len(zones)
    i = 0
    slave_nodes = []
    for zone in zones:
      num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
      if num_slaves_this_zone > 0:
        slave_res = image.run(key_name = opts.key_pair,
                              security_group_ids = [slave_group.id],
                              instance_type = opts.instance_type,
                              subnet_id = subnetId(),
                              placement = zone,
                              min_count = num_slaves_this_zone,
                              max_count = num_slaves_this_zone,
                              block_device_map = block_map,
                              user_data = user_data,
                              instance_profile_arn = opts.profile)
        slave_nodes += slave_res.instances
        print("Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
                                                        zone, slave_res.id), file=sys.stderr)
      i += 1

  # Launch or resume masters
  if existing_masters:
    print("Starting master...")
    for inst in existing_masters:
      if inst.state not in ["shutting-down", "terminated"]:
        inst.start()
    master_nodes = existing_masters
  else:
    master_type = opts.master_instance_type
    if master_type == "":
      master_type = opts.instance_type
    if opts.zone == 'all':
      opts.zone = random.choice(conn.get_all_zones()).name
    if opts.spot_price != None:
      best_price = find_best_price(conn,master_type,opts.zone,opts.spot_price)
      # Launch spot instances with the requested price
      print(("Requesting master as spot instances with price $%.3f/hour" % (best_price)), file=sys.stderr)

      interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(subnet_id=subnetId(), groups=[master_group.id], associate_public_ip_address=True)
      interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(interface)

      master_reqs = conn.request_spot_instances(
        price = best_price,
        image_id = opts.ami,
        launch_group = "launch-group-%s" % cluster_name,
        placement = opts.zone,
        count = 1,
        key_name = opts.key_pair,
        instance_type = master_type,
        block_device_map = block_map,
        user_data = user_data,
        instance_profile_arn = opts.profile,
        network_interfaces = interfaces)
      my_req_ids = [r.id for r in master_reqs]
      print("Waiting for spot instance to be granted", file=sys.stderr)
      try:
        while True:
          time.sleep(10)
          reqs = conn.get_all_spot_instance_requests(request_ids=my_req_ids)
          id_to_req = {}
          for r in reqs:
            id_to_req[r.id] = r
          active_instance_ids = []
          for i in my_req_ids:
            #print(id_to_req[i].state, file=sys.stderr)
            if i in id_to_req and id_to_req[i].state == "active":
              active_instance_ids.append(id_to_req[i].instance_id)
          if len(active_instance_ids) == 1:
            print ( "Master granted", file=sys.stderr)
            reservations = conn.get_all_instances(active_instance_ids)
            master_nodes = []
            for r in reservations:
              master_nodes += r.instances
            break
          else:
            # print >> stderr, ".",
            print("%d of %d masters granted, waiting longer" % (
              len(active_instance_ids), 1))
      except:
        print("Canceling spot instance requests", file=sys.stderr)
        conn.cancel_spot_instance_requests(my_req_ids)
        # Log a warning if any of these requests actually launched instances:
        (master_nodes, master_nodes) = get_existing_cluster(
            conn, opts, cluster_name, die_on_error=False)
        running = len(master_nodes) + len(master_nodes)
        if running:
          print(("WARNING: %d instances are still running" % running), file=sys.stderr)
        sys.exit(0)
    else:
      master_res = image.run(key_name = opts.key_pair,
                             security_group_ids = [master_group.id],
                             instance_type = master_type,
                             subnet_id = subnetId(),
                             placement = opts.zone,
                             min_count = 1,
                             max_count = 1,
                             block_device_map = block_map,
                             user_data = user_data,
                             instance_profile_arn = opts.profile)
      master_nodes = master_res.instances
      print("Launched master in %s, regid = %s" % (zone, master_res.id), file=sys.stderr)
  # Return all the instances
  return (master_nodes, slave_nodes)
コード例 #54
0
ファイル: abbey.py プロジェクト: ragutierrez/configuration
def create_instance_args():
    """
    Looks up security group, subnet
    and returns arguments to pass into
    ec2.run_instances() including
    user data
    """

    vpc = boto.vpc.connect_to_region(args.region)
    subnet = vpc.get_all_subnets(
        filters={
            'tag:aws:cloudformation:stack-name': stack_name,
            'tag:play': args.play}
    )

    if len(subnet) < 1:
        #
        # try scheme for non-cloudformation builds
        #

        subnet = vpc.get_all_subnets(
            filters={
                'tag:play': args.play,
                'tag:environment': args.environment,
                'tag:deployment': args.deployment}
        )

    if len(subnet) < 1:
        sys.stderr.write("ERROR: Expected at least one subnet, got {} for {}-{}-{}\n".format(
            len(subnet), args.environment, args.deployment, args.play))
        sys.exit(1)
    subnet_id = subnet[0].id
    vpc_id = subnet[0].vpc_id

    security_group_id = get_instance_sec_group(vpc_id)

    if args.identity:
        config_secure = 'true'
        with open(args.identity) as f:
            identity_contents = f.read()
    else:
        config_secure = 'false'
        identity_contents = "dummy"

    user_data = """#!/bin/bash
set -x
set -e
exec > >(tee /var/log/user-data.log|logger -t user-data -s 2>/dev/console) 2>&1
base_dir="/var/tmp/edx-cfg"
extra_vars="$base_dir/extra-vars-$$.yml"
secure_identity="$base_dir/secure-identity"
git_ssh="$base_dir/git_ssh.sh"
configuration_version="{configuration_version}"
configuration_secure_version="{configuration_secure_version}"
configuration_private_version="{configuration_private_version}"
configuration_internal_version="{configuration_internal_version}"
environment="{environment}"
deployment="{deployment}"
play="{play}"
cluster="{play}"
config_secure={config_secure}
git_repo_name="configuration"
git_repo="https://github.com/edx/$git_repo_name"
git_repo_secure="{configuration_secure_repo}"
git_repo_secure_name=$(basename $git_repo_secure .git)
git_repo_private="{configuration_private_repo}"
git_repo_private_name=$(basename $git_repo_private .git)
git_repo_internal="{configuration_internal_repo}"
git_repo_internal_name=$(basename $git_repo_internal .git)
secure_vars_file={secure_vars_file}
environment_deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{environment}-{deployment}.yml"
deployment_secure_vars="$base_dir/$git_repo_secure_name/ansible/vars/{deployment}.yml"
environment_deployment_internal_vars="$base_dir/$git_repo_internal_name/ansible/vars/{environment}-{deployment}.yml"
deployment_internal_vars="$base_dir/$git_repo_internal_name/ansible/vars/{deployment}.yml"
instance_id=\\
$(curl http://169.254.169.254/latest/meta-data/instance-id 2>/dev/null)
instance_ip=\\
$(curl http://169.254.169.254/latest/meta-data/local-ipv4 2>/dev/null)
instance_type=\\
$(curl http://169.254.169.254/latest/meta-data/instance-type 2>/dev/null)
playbook_dir="$base_dir/{playbook_dir}"

if $config_secure; then
    git_cmd="env GIT_SSH=$git_ssh git"
else
    git_cmd="git"
fi

ANSIBLE_ENABLE_SQS=true
SQS_NAME={queue_name}
SQS_REGION={region}
SQS_MSG_PREFIX="[ $instance_id $instance_ip $environment-$deployment $play ]"
PYTHONUNBUFFERED=1
HIPCHAT_TOKEN={hipchat_token}
HIPCHAT_ROOM={hipchat_room}
HIPCHAT_MSG_PREFIX="$environment-$deployment-$play: "
HIPCHAT_FROM="ansible-$instance_id"
HIPCHAT_MSG_COLOR=$(echo -e "yellow\\ngreen\\npurple\\ngray" | shuf | head -1)
DATADOG_API_KEY={datadog_api_key}
# environment for ansible
export ANSIBLE_ENABLE_SQS SQS_NAME SQS_REGION SQS_MSG_PREFIX PYTHONUNBUFFERED
export HIPCHAT_TOKEN HIPCHAT_ROOM HIPCHAT_MSG_PREFIX HIPCHAT_FROM
export HIPCHAT_MSG_COLOR DATADOG_API_KEY


#################################### Lifted from ansible-bootstrap.sh
if [[ -z "$ANSIBLE_REPO" ]]; then
  ANSIBLE_REPO="https://github.com/edx/ansible.git"
fi

if [[ -z "$ANSIBLE_VERSION" ]]; then
  ANSIBLE_VERSION="master"
fi

if [[ -z "$CONFIGURATION_REPO" ]]; then
  CONFIGURATION_REPO="https://github.com/edx/configuration.git"
fi

if [[ -z "$CONFIGURATION_VERSION" ]]; then
  CONFIGURATION_VERSION="master"
fi

if [[ -z "$UPGRADE_OS" ]]; then
  UPGRADE_OS=false
fi

#
# Bootstrapping constants
#
VIRTUAL_ENV_VERSION="15.0.2"
PIP_VERSION="8.1.2"
SETUPTOOLS_VERSION="24.0.3"
EDX_PPA="deb http://ppa.edx.org precise main"
EDX_PPA_KEY_SERVER="hkp://keyserver.ubuntu.com:80"
EDX_PPA_KEY_ID="B41E5E3969464050"

cat << EOF
******************************************************************************

Running the abbey with the following arguments:

ANSIBLE_REPO="$ANSIBLE_REPO"
ANSIBLE_VERSION="$ANSIBLE_VERSION"
CONFIGURATION_REPO="$CONFIGURATION_REPO"
CONFIGURATION_VERSION="$CONFIGURATION_VERSION"

******************************************************************************
EOF


if [[ $(id -u) -ne 0 ]] ;then
    echo "Please run as root";
    exit 1;
fi

if grep -q 'Precise Pangolin' /etc/os-release
then
    SHORT_DIST="precise"
elif grep -q 'Trusty Tahr' /etc/os-release
then
    SHORT_DIST="trusty"
elif grep -q 'Xenial Xerus' /etc/os-release
then
    SHORT_DIST="xenial"
else
    cat << EOF

    This script is only known to work on Ubuntu Precise, Trusty and Xenial,
    exiting.  If you are interested in helping make installation possible
    on other platforms, let us know.

EOF
   exit 1;
fi

EDX_PPA="deb http://ppa.edx.org $SHORT_DIST main"

# Upgrade the OS
apt-get update -y
apt-key update -y

if [ "$UPGRADE_OS" = true ]; then
    echo "Upgrading the OS..."
    apt-get upgrade -y
fi

# Required for add-apt-repository
apt-get install -y software-properties-common python-software-properties

# Add git PPA
add-apt-repository -y ppa:git-core/ppa

# For older distributions we need to install a PPA for Python 2.7.10
if [[ "precise" = "$SHORT_DIST" || "trusty" = "$SHORT_DIST" ]]; then

    # Add python PPA
    apt-key adv --keyserver "$EDX_PPA_KEY_SERVER" --recv-keys "$EDX_PPA_KEY_ID"
    add-apt-repository -y "$EDX_PPA"
fi

# Install python 2.7 latest, git and other common requirements
# NOTE: This will install the latest version of python 2.7 and
# which may differ from what is pinned in virtualenvironments
apt-get update -y

apt-get install -y python2.7 python2.7-dev python-pip python-apt python-yaml python-jinja2 build-essential sudo git-core libmysqlclient-dev libffi-dev libssl-dev

# Workaround for a 16.04 bug, need to upgrade to latest and then
# potentially downgrade to the preferred version.
# https://github.com/pypa/pip/issues/3862
if [[ "xenial" = "$SHORT_DIST" ]]; then
    pip install --upgrade pip
    pip install --upgrade pip=="$PIP_VERSION"
else
    pip install --upgrade pip=="$PIP_VERSION"
fi

# pip moves to /usr/local/bin when upgraded
hash -r   #pip may have moved from /usr/bin/ to /usr/local/bin/. This clears bash's path cache.
PATH=/usr/local/bin:$PATH
pip install setuptools=="$SETUPTOOLS_VERSION"
pip install virtualenv=="$VIRTUAL_ENV_VERSION"


##################### END Lifted from ansible-bootstrap.sh


# python3 is required for certain other things
# (currently xqwatcher so it can run python2 and 3 grader code,
# but potentially more in the future). It's not available on Ubuntu 12.04,
# but in those cases we don't need it anyways.
if [[ -n "$(apt-cache search --names-only '^python3-pip$')" ]]; then
    /usr/bin/apt-get update
    /usr/bin/apt-get install -y python3-pip python3-dev
fi

# this is missing on 14.04 (base package on 12.04)
# we need to do this on any build, since the above apt-get
# only runs on a build from scratch
/usr/bin/apt-get install -y python-httplib2 --force-yes

rm -rf $base_dir
mkdir -p $base_dir
cd $base_dir

cat << EOF > $git_ssh
#!/bin/sh
exec /usr/bin/ssh -o StrictHostKeyChecking=no -i "$secure_identity" "\$@"
EOF

chmod 755 $git_ssh

if $config_secure; then
    cat << EOF > $secure_identity
{identity_contents}
EOF
fi

cat << EOF >> $extra_vars
---
# extra vars passed into
# abbey.py including versions
# of all the repositories
{extra_vars_yml}

# abbey will always run fake migrations
# this is so that the application can come
# up healthy
fake_migrations: true

disable_edx_services: true
COMMON_TAG_EC2_INSTANCE: true

# abbey should never take instances in
# and out of elbs
elb_pre_post: false
EOF

chmod 400 $secure_identity

$git_cmd clone $git_repo $git_repo_name
cd $git_repo_name
$git_cmd checkout $configuration_version
cd $base_dir

if $config_secure; then
    $git_cmd clone $git_repo_secure $git_repo_secure_name
    cd $git_repo_secure_name
    $git_cmd checkout $configuration_secure_version
    cd $base_dir
fi

if [[ ! -z $git_repo_private ]]; then
    $git_cmd clone $git_repo_private $git_repo_private_name
    cd $git_repo_private_name
    $git_cmd checkout $configuration_private_version
    cd $base_dir
fi

if [[ ! -z $git_repo_internal ]]; then
    $git_cmd clone $git_repo_internal $git_repo_internal_name
    cd $git_repo_internal_name
    $git_cmd checkout $configuration_internal_version
    cd $base_dir
fi


cd $base_dir/$git_repo_name
sudo pip install -r pre-requirements.txt
sudo pip install -r requirements.txt

cd $playbook_dir

if [[ -r "$deployment_internal_vars" ]]; then
    extra_args_opts+=" -e@$deployment_internal_vars"
fi

if [[ -r "$environment_deployment_internal_vars" ]]; then
    extra_args_opts+=" -e@$environment_deployment_internal_vars"
fi

if [[ -r "$deployment_secure_vars" ]]; then
    extra_args_opts+=" -e@$deployment_secure_vars"
fi

if [[ -r "$environment_deployment_secure_vars" ]]; then
    extra_args_opts+=" -e@$environment_deployment_secure_vars"
fi

if $secure_vars_file; then
    extra_args_opts+=" -e@$secure_vars_file"
fi

extra_args_opts+=" -e@$extra_vars"

ansible-playbook -vvvv -c local -i "localhost," $play.yml $extra_args_opts
ansible-playbook -vvvv -c local -i "localhost," stop_all_edx_services.yml $extra_args_opts

rm -rf $base_dir

    """.format(
                hipchat_token=args.hipchat_api_token,
                hipchat_room=args.ansible_hipchat_room_id,
                configuration_version=args.configuration_version,
                configuration_secure_version=args.configuration_secure_version,
                configuration_secure_repo=args.configuration_secure_repo,
                configuration_private_version=args.configuration_private_version,
                configuration_private_repo=args.configuration_private_repo,
                configuration_internal_version=args.configuration_internal_version,
                configuration_internal_repo=args.configuration_internal_repo,
                environment=args.environment,
                deployment=args.deployment,
                play=args.play,
                playbook_dir=args.playbook_dir,
                config_secure=config_secure,
                identity_contents=identity_contents,
                queue_name=run_id,
                extra_vars_yml=extra_vars_yml,
                secure_vars_file=secure_vars_file,
                cache_id=args.cache_id,
                datadog_api_key=args.datadog_api_key,
                region=args.region)

    mapping = BlockDeviceMapping()
    root_vol = BlockDeviceType(size=args.root_vol_size,
                               volume_type='gp2')
    mapping['/dev/sda1'] = root_vol

    ec2_args = {
        'security_group_ids': [security_group_id],
        'subnet_id': subnet_id,
        'key_name': args.keypair,
        'image_id': base_ami,
        'instance_type': args.instance_type,
        'instance_profile_name': args.role_name,
        'user_data': user_data,
        'block_device_map': mapping,
    }

    return ec2_args
コード例 #55
0
ファイル: create-ebs-amis.py プロジェクト: nbp/nixos-1
    print >> sys.stderr, "created snapshot {0}".format(snapshot.id)

    charon.util.check_wait(check, max_tries=120)

    m._conn.create_tags([snapshot.id], {'Name': ami_name})

    if not args.keep: depl.destroy_vms()

    # Register the image.
    aki = m._conn.get_all_images(
        filters={'manifest-location': '*pv-grub-hd0_1.03-x86_64*'})[0]
    print >> sys.stderr, "using kernel image {0} - {1}".format(
        aki.id, aki.location)

    block_map = BlockDeviceMapping()
    block_map['/dev/sda'] = BlockDeviceType(snapshot_id=snapshot.id,
                                            delete_on_termination=True)
    block_map['/dev/sdb'] = BlockDeviceType(ephemeral_name="ephemeral0")
    block_map['/dev/sdc'] = BlockDeviceType(ephemeral_name="ephemeral1")
    block_map['/dev/sdd'] = BlockDeviceType(ephemeral_name="ephemeral2")
    block_map['/dev/sde'] = BlockDeviceType(ephemeral_name="ephemeral3")

    ami_id = m._conn.register_image(name=ami_name,
                                    description=description,
                                    architecture="x86_64",
                                    root_device_name="/dev/sda",
                                    kernel_id=aki.id,
                                    block_device_map=block_map)

print >> sys.stderr, "registered AMI {0}".format(ami_id)

time.sleep(5)
コード例 #56
0
ファイル: ec2_cloud_ops.py プロジェクト: 5thphase/cbtool
    def vmcreate(self, obj_attr_list) :
        '''
        TBD
        '''
        try :
            _status = 100
            _fmsg = "An error has occurred, but no error message was captured"
            
            _instance = False
            _reservation = False
            
            self.determine_instance_name(obj_attr_list)            
            self.determine_key_name(obj_attr_list)

            obj_attr_list["last_known_state"] = "about to connect to " + self.get_description() + " manager"

            self.take_action_if_requested("VM", obj_attr_list, "provision_originated")

            if not self.ec2conn :
                self.connect(obj_attr_list["access"], obj_attr_list["credentials"], \
                             obj_attr_list["vmc_name"])

            if self.is_vm_running(obj_attr_list) :
                _msg = "An instance named \"" + obj_attr_list["cloud_vm_name"]
                _msg += " is already running. It needs to be destroyed first."
                _status = 187
                cberr(_msg)
                raise CldOpsException(_msg, _status)

            # "Security groups" must be a list
            _security_groups = []
            _security_groups.append(obj_attr_list["security_groups"])

            _time_mark_prs = int(time())
            obj_attr_list["mgt_002_provisioning_request_sent"] = _time_mark_prs - int(obj_attr_list["mgt_001_provisioning_request_originated"])

            self.vm_placement(obj_attr_list)

            obj_attr_list["last_known_state"] = "about to send create request"
            
            self.get_images(obj_attr_list)
            self.get_networks(obj_attr_list)

            obj_attr_list["config_drive"] = False

            # We need the instance placemente information before creating the actual volume
            #self.vvcreate(obj_attr_list)

            if "cloud_rv_type" not in obj_attr_list :
                obj_attr_list["cloud_rv_type"] = "standard"

            _bdm = BlockDeviceMapping()
            '''
            Options:
            gp2 (== ssd)
            io1 (also ssd)
            st1 (not sure)
            sc1 (cold?)
            standard (spinners)
            '''

            if obj_attr_list["cloud_rv_iops"] == "0":
                _iops = None
            else:
                _iops = obj_attr_list["cloud_rv_iops"]

            if "cloud_rv" in obj_attr_list and obj_attr_list["cloud_rv"] != "0":
                _size = obj_attr_list["cloud_rv"]
            else:
                _size = None

            _bdm['/dev/sda1'] = BlockDeviceType(volume_type = obj_attr_list["cloud_rv_type"], delete_on_termination=True, iops=_iops, size=_size)

            self.common_messages("VM", obj_attr_list, "creating", 0, '')

            self.pre_vmcreate_process(obj_attr_list)
            _reservation = self.ec2conn.run_instances(image_id = obj_attr_list["boot_volume_imageid1"], \
                                                      instance_type = obj_attr_list["size"], \
                                                      key_name = obj_attr_list["key_name"], \
                                                      user_data = self.populate_cloudconfig(obj_attr_list),
                                                      block_device_map = _bdm,
                                                      security_groups = _security_groups)

            if _reservation :

                sleep(int(obj_attr_list["update_frequency"]))
                
                _instance = _reservation.instances[0]
        
                _instance.add_tag("Name", obj_attr_list["cloud_vm_name"])            
                
                obj_attr_list["cloud_vm_uuid"] = '{0}'.format(_instance.id)
                obj_attr_list["instance_obj"] = _instance

                self.vvcreate(obj_attr_list)

                self.take_action_if_requested("VM", obj_attr_list, "provision_started")

                _time_mark_prc = self.wait_for_instance_ready(obj_attr_list, _time_mark_prs)

                if obj_attr_list["cloud_vv_instance"] :
                    self.common_messages("VV", obj_attr_list, "attaching", _status, _fmsg)
                    obj_attr_list["cloud_vv_instance"].attach(obj_attr_list["cloud_vm_uuid"], "/dev/xvdc")
                
                self.wait_for_instance_boot(obj_attr_list, _time_mark_prc)

                obj_attr_list["host_name"] = "unknown"

                self.take_action_if_requested("VM", obj_attr_list, "provision_finished")
                    
                _status = 0

                if obj_attr_list["force_failure"].lower() == "true" :
                    _fmsg = "Forced failure (option FORCE_FAILURE set \"true\")"                    
                    _status = 916

        except CldOpsException as obj :
            _status = obj.status
            _fmsg = str(obj.msg)

        except AWSException as obj :
            _status = int(obj.error_code)
            _fmsg = str(obj.error_message)

        except Exception as msg :
            _fmsg = str(msg)
            _status = 23
    
        finally :
            if _status and _reservation is not False :
                cbdebug("Error after VM creation. Cleanup...", True)
                self.vmdestroy_repeat(obj_attr_list)

            self.post_vmboot_process(obj_attr_list)
            _status, _msg = self.common_messages("VM", obj_attr_list, "created", _status, _fmsg)
            return _status, _msg
コード例 #57
0
def create_ami(host_instance, args, config, instance_config, ssh_key,
               key_filename, instance_data, deploypass, cert, pkey,
               ami_name_prefix):
    connection = host_instance.connection
    setup_fabric_env(instance=host_instance,
                     abort_on_prompts=True,
                     disable_known_hosts=True,
                     key_filename=key_filename)

    target_name = args.config
    virtualization_type = config.get("virtualization_type")
    config_dir = "%s/%s" % (AMI_CONFIGS_DIR, target_name)
    if ami_name_prefix:
        prefix = ami_name_prefix
    else:
        prefix = args.config
    dated_target_name = "{}-{}".format(
        prefix, time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))

    if config.get('distro') in ('debian', 'ubuntu'):
        ubuntu_release = config.get("release", "precise")
    int_dev_name = config['target']['int_dev_name']
    mount_dev = int_dev_name
    grub_dev = int_dev_name
    mount_point = config['target']['mount_point']
    boot_mount_dev = None
    host_packages_file = os.path.join(config_dir, "host_packages")
    packages_file = os.path.join(config_dir, "packages")
    if os.path.exists(host_packages_file):
        install_packages(host_packages_file, config.get('distro'))

    v = attach_and_wait(host_instance, config['target']['size'],
                        config['target']['aws_dev_name'], int_dev_name)

    # Step 0: install required packages
    if config.get('distro') == "centos":
        run('which MAKEDEV >/dev/null || yum -d 1 install -y MAKEDEV')

    # Step 1: prepare target FS
    run('mkdir -p %s' % mount_point)
    if config.get("root_device_type") == "instance-store":
        # Use file image
        mount_dev = "/dev/cloud_root/lv_root"
        grub_dev = "/dev/loop0"
        boot_mount_dev = "/dev/mapper/loop0p1"
        img_file = dated_target_name
        partition_image(mount_dev=mount_dev,
                        int_dev_name=int_dev_name,
                        img_file=img_file)

    elif virtualization_type == "hvm":
        # use EBS volume
        mount_dev = "/dev/cloud_root/lv_root"
        boot_mount_dev = "%s1" % int_dev_name
        partition_ebs_volume(int_dev_name=int_dev_name)

    run('/sbin/mkfs.{fs_type} {args} {dev}'.format(
        fs_type=config['target']['fs_type'],
        args=config['target'].get("mkfs_args", ""),
        dev=mount_dev))
    run('/sbin/e2label {dev} {label}'.format(
        dev=mount_dev, label=config['target']['e2_label']))
    run('mount {dev} {mount_point}'.format(dev=mount_dev,
                                           mount_point=mount_point))
    run('mkdir {0}/dev {0}/proc {0}/etc {0}/boot {0}/sys'.format(mount_point))
    run('mount -t sysfs sys %s/sys' % mount_point)

    if config.get('distro') not in ('debian', 'ubuntu'):
        run('mount -t proc proc %s/proc' % mount_point)
        run('for i in console null zero random urandom; '
            'do /sbin/MAKEDEV -d %s/dev -x $i ; done' % mount_point)
    if boot_mount_dev:
        run('mount {} {}/boot'.format(boot_mount_dev, mount_point))

    # Step 2: install base system
    if config.get('distro') in ('debian', 'ubuntu'):
        run("debootstrap %s %s "
            "http://puppet/repos/apt/ubuntu/" % (ubuntu_release, mount_point))
        run('chroot %s mount -t proc none /proc' % mount_point)
        run('mount -o bind /dev %s/dev' % mount_point)
        put('%s/releng-public-%s.list' % (AMI_CONFIGS_DIR, ubuntu_release),
            '%s/etc/apt/sources.list' % mount_point)
        with lcd(config_dir):
            put('usr/sbin/policy-rc.d',
                '%s/usr/sbin/' % mount_point,
                mirror_local_mode=True)
        install_packages(packages_file,
                         config.get('distro'),
                         chroot=mount_point)
    else:
        with lcd(config_dir):
            put('etc/yum-local.cfg', '%s/etc/yum-local.cfg' % mount_point)
        yum = 'yum -d 1 -c {0}/etc/yum-local.cfg -y --installroot={0} '.format(
            mount_point)
        # this groupinstall emulates the %packages section of the kickstart
        # config, which defaults to Core and Base.
        run('%s groupinstall Core Base' % yum)
        run('%s clean packages' % yum)
        # Rebuild RPM DB for cases when versions mismatch
        run('chroot %s rpmdb --rebuilddb || :' % mount_point)

    # Step 3: upload custom configuration files
    run('chroot %s mkdir -p /boot/grub' % mount_point)
    for directory in ('boot', 'etc', 'usr'):
        local_directory = os.path.join(config_dir, directory)
        remote_directory = os.path.join(mount_point, directory)
        if not os.path.exists(local_directory):
            pass

        sync(local_directory, remote_directory)

    # Step 4: tune configs
    run('sed -i -e s/@ROOT_DEV_LABEL@/{label}/g -e s/@FS_TYPE@/{fs}/g '
        '{mnt}/etc/fstab'.format(label=config['target']['e2_label'],
                                 fs=config['target']['fs_type'],
                                 mnt=mount_point))
    if config.get('distro') in ('debian', 'ubuntu'):
        if virtualization_type == "hvm":
            run("chroot {mnt} grub-install {int_dev_name}".format(
                mnt=mount_point, int_dev_name=int_dev_name))
            run("chroot {mnt} update-grub".format(mnt=mount_point))
        else:
            run("chroot {mnt} update-grub -y".format(mnt=mount_point))
            run("sed  -i 's/^# groot.*/# groot=(hd0)/g' "
                "{mnt}/boot/grub/menu.lst".format(mnt=mount_point))
            run("chroot {mnt} update-grub".format(mnt=mount_point))
    else:
        run('ln -s grub.conf %s/boot/grub/menu.lst' % mount_point)
        run('ln -s ../boot/grub/grub.conf %s/etc/grub.conf' % mount_point)
        if config.get('kernel_package') == 'kernel-PAE':
            run('sed -i s/@VERSION@/`chroot %s rpm -q '
                '--queryformat "%%{version}-%%{release}.%%{arch}.PAE" '
                '%s | tail -n1`/g %s/boot/grub/grub.conf' %
                (mount_point, config.get('kernel_package',
                                         'kernel'), mount_point))
        else:
            run('sed -i s/@VERSION@/`chroot %s rpm -q '
                '--queryformat "%%{version}-%%{release}.%%{arch}" '
                '%s | tail -n1`/g %s/boot/grub/grub.conf' %
                (mount_point, config.get('kernel_package',
                                         'kernel'), mount_point))
        if config.get("root_device_type") == "instance-store":
            # files normally copied by grub-install
            run("cp -va /usr/share/grub/x86_64-redhat/* /mnt/boot/grub/")
            put(os.path.join(config_dir, "grub.cmd"), "/tmp/grub.cmd")
            run("sed -i s/@IMG@/{}/g /tmp/grub.cmd".format(img_file))
            run("cat /tmp/grub.cmd | grub --device-map=/dev/null")
        elif virtualization_type == "hvm":
            # See https://bugs.archlinux.org/task/30241 for the details,
            # grub-nstall doesn't handle /dev/xvd* devices properly
            grub_install_patch = os.path.join(config_dir, "grub-install.diff")
            if os.path.exists(grub_install_patch):
                put(grub_install_patch, "/tmp/grub-install.diff")
                run('which patch >/dev/null || yum -d 1 install -y patch')
                run('patch -p0 -i /tmp/grub-install.diff /sbin/grub-install')
            run("grub-install --root-directory=%s --no-floppy %s" %
                (mount_point, grub_dev))

    run("sed -i -e '/PermitRootLogin/d' -e '/UseDNS/d' "
        "-e '$ a PermitRootLogin without-password' "
        "-e '$ a UseDNS no' "
        "%s/etc/ssh/sshd_config" % mount_point)

    if config.get('distro') in ('debian', 'ubuntu'):
        pass
    else:
        manage_service("network", mount_point, "on")
        manage_service("rc.local", mount_point, "on")

    if config.get("root_device_type") == "instance-store" and \
            config.get("distro") == "centos":
        instance_data = instance_data.copy()
        instance_data['name'] = host_instance.tags.get("Name")
        instance_data['hostname'] = host_instance.tags.get("FQDN")
        run("cp /etc/resolv.conf {}/etc/resolv.conf".format(mount_point))
        # make puppet happy
        # disable ipv6
        run("/sbin/service ip6tables stop")
        # mount /dev to let sshd start
        run('mount -o bind /dev %s/dev' % mount_point)
        assimilate_instance(host_instance,
                            instance_config,
                            ssh_key,
                            instance_data,
                            deploypass,
                            chroot=mount_point,
                            reboot=False)
        ami_cleanup(mount_point=mount_point, distro=config["distro"])
        # kill chroot processes
        put('%s/kill_chroot.sh' % AMI_CONFIGS_DIR, '/tmp/kill_chroot.sh')
        run('bash /tmp/kill_chroot.sh {}'.format(mount_point))
        run('swapoff -a')
    run('umount %s/dev || :' % mount_point)
    if config.get("distro") == "ubuntu":
        run('rm -f %s/usr/sbin/policy-rc.d' % mount_point)
        run('chroot %s ln -s /sbin/MAKEDEV /dev/' % mount_point)
        for dev in ('zero', 'null', 'console', 'generic'):
            run('chroot %s sh -c "cd /dev && ./MAKEDEV %s"' %
                (mount_point, dev))
    run('umount %s/sys || :' % mount_point)
    run('umount %s/proc || :' % mount_point)
    run('umount %s/dev  || :' % mount_point)
    run('umount %s/boot || :' % mount_point)
    run('umount %s' % mount_point)
    if config.get("root_device_type") == "instance-store" \
            and config.get("distro") == "centos":
        # create bundle
        run("yum -d 1 install -y ruby "
            "http://s3.amazonaws.com/ec2-downloads/ec2-ami-tools.noarch.rpm")
        bundle_location = "{b}/{d}/{t}/{n}".format(
            b=config["bucket"],
            d=config["bucket_dir"],
            t=config["target"]["tags"]["moz-type"],
            n=dated_target_name)
        manifest_location = "{}/{}.manifest.xml".format(
            bundle_location, dated_target_name)
        run("mkdir -p /mnt-tmp/out")
        put(cert, "/mnt-tmp/cert.pem")
        put(pkey, "/mnt-tmp/pk.pem")
        run("ec2-bundle-image -c /mnt-tmp/cert.pem -k /mnt-tmp/pk.pem "
            "-u {uid} -i /mnt-tmp/{img_file} -d /mnt-tmp/out -r x86_64".format(
                img_file=img_file, uid=config["aws_user_id"]))

        with hide('running', 'stdout', 'stderr'):
            log.info("uploading bundle")
            run("ec2-upload-bundle -b {bundle_location}"
                " --access-key {access_key} --secret-key {secret_key}"
                " --region {region}"
                " -m /mnt-tmp/out/{img_file}.manifest.xml  --retry".format(
                    bundle_location=bundle_location,
                    access_key=boto.config.get("Credentials",
                                               "aws_access_key_id"),
                    secret_key=boto.config.get("Credentials",
                                               "aws_secret_access_key"),
                    region=connection.region.name,
                    img_file=img_file))

    v.detach(force=True)
    wait_for_status(v, "status", "available", "update")
    if not config.get("root_device_type") == "instance-store":
        # Step 5: Create a snapshot
        log.info('Creating a snapshot')
        snapshot = v.create_snapshot(dated_target_name)
        wait_for_status(snapshot, "status", "completed", "update")
        snapshot.add_tag('Name', dated_target_name)
        snapshot.add_tag('moz-created', str(int(time.mktime(time.gmtime()))))

    # Step 6: Create an AMI
    log.info('Creating AMI')
    if config.get("root_device_type") == "instance-store":
        ami_id = connection.register_image(
            dated_target_name,
            '%s AMI' % dated_target_name,
            architecture=config['arch'],
            virtualization_type=virtualization_type,
            image_location=manifest_location,
        )
    else:
        host_img = connection.get_image(config['ami'])
        block_map = BlockDeviceMapping()
        block_map[host_img.root_device_name] = BlockDeviceType(
            snapshot_id=snapshot.id)
        root_device_name = host_img.root_device_name
        if virtualization_type == "hvm":
            kernel_id = None
            ramdisk_id = None
        else:
            kernel_id = host_img.kernel_id
            ramdisk_id = host_img.ramdisk_id

        ami_id = connection.register_image(
            dated_target_name,
            '%s AMI' % dated_target_name,
            architecture=config['arch'],
            kernel_id=kernel_id,
            ramdisk_id=ramdisk_id,
            root_device_name=root_device_name,
            block_device_map=block_map,
            virtualization_type=virtualization_type,
        )
    while True:
        try:
            ami = connection.get_image(ami_id)
            ami.add_tag('Name', dated_target_name)
            ami.add_tag('moz-created', str(int(time.mktime(time.gmtime()))))
            if config["target"].get("tags"):
                for tag, value in config["target"]["tags"].items():
                    log.info("Tagging %s: %s", tag, value)
                    ami.add_tag(tag, value)
            log.info('AMI created')
            log.info('ID: {id}, name: {name}'.format(id=ami.id, name=ami.name))
            break
        except:
            log.info('Wating for AMI')
            time.sleep(10)

    # Step 7: Cleanup
    if not args.keep_volume:
        log.info('Deleting volume')
        v.delete()
    if not args.keep_host_instance:
        log.info('Terminating host instance')
        host_instance.terminate()

    return ami
コード例 #58
0
def test_create_launch_configuration_with_block_device_mappings():
    block_device_mapping = BlockDeviceMapping()

    ephemeral_drive = BlockDeviceType()
    ephemeral_drive.ephemeral_name = 'ephemeral0'
    block_device_mapping['/dev/xvdb'] = ephemeral_drive

    snapshot_drive = BlockDeviceType()
    snapshot_drive.snapshot_id = "snap-1234abcd"
    snapshot_drive.volume_type = "standard"
    block_device_mapping['/dev/xvdp'] = snapshot_drive

    ebs_drive = BlockDeviceType()
    ebs_drive.volume_type = "io1"
    ebs_drive.size = 100
    ebs_drive.iops = 1000
    ebs_drive.delete_on_termination = False
    block_device_mapping['/dev/xvdh'] = ebs_drive

    conn = boto.connect_autoscale(use_block_device_types=True)
    config = LaunchConfiguration(
        name='tester',
        image_id='ami-abcd1234',
        instance_type='m1.small',
        key_name='the_keys',
        security_groups=["default", "default2"],
        user_data=b"This is some user_data",
        instance_monitoring=True,
        instance_profile_name=
        'arn:aws:iam::123456789012:instance-profile/testing',
        spot_price=0.1,
        block_device_mappings=[block_device_mapping])
    conn.create_launch_configuration(config)

    launch_config = conn.get_all_launch_configurations()[0]
    launch_config.name.should.equal('tester')
    launch_config.image_id.should.equal('ami-abcd1234')
    launch_config.instance_type.should.equal('m1.small')
    launch_config.key_name.should.equal('the_keys')
    set(launch_config.security_groups).should.equal(
        set(['default', 'default2']))
    launch_config.user_data.should.equal(b"This is some user_data")
    launch_config.instance_monitoring.enabled.should.equal('true')
    launch_config.instance_profile_name.should.equal(
        'arn:aws:iam::123456789012:instance-profile/testing')
    launch_config.spot_price.should.equal(0.1)
    len(launch_config.block_device_mappings).should.equal(3)

    returned_mapping = launch_config.block_device_mappings

    set(returned_mapping.keys()).should.equal(
        set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh']))

    returned_mapping['/dev/xvdh'].iops.should.equal(1000)
    returned_mapping['/dev/xvdh'].size.should.equal(100)
    returned_mapping['/dev/xvdh'].volume_type.should.equal("io1")
    returned_mapping['/dev/xvdh'].delete_on_termination.should.be.false

    returned_mapping['/dev/xvdp'].snapshot_id.should.equal("snap-1234abcd")
    returned_mapping['/dev/xvdp'].volume_type.should.equal("standard")

    returned_mapping['/dev/xvdb'].ephemeral_name.should.equal('ephemeral0')
コード例 #59
0
 def test_startElement_with_name_virtualName_sets_and_returns_current_value(
         self):
     retval = self.block_device_mapping.startElement(
         "virtualName", None, None)
     assert self.block_device_type_eq(
         retval, BlockDeviceType(self.block_device_mapping))
コード例 #60
0
ファイル: ec2_launch_v1.0.py プロジェクト: zeus911/cloud
## wangfei 2014-12-18
import requests
import time
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceMapping, BlockDeviceType
from boto.ec2.networkinterface import NetworkInterfaceSpecification, NetworkInterfaceCollection

ec2_region = 'ap-southeast-1'
ami_id = 'ami-d6e7c084' # Ubuntu Server 14.04 LTS (HVM)
instance_type = 't2.micro'
subnet_id = 'subnet-xxxxxxx'
groups = ['sg-xxxxxx']
tags = {'Name':'wangfei-test','PROJECT':'test'}
key_name = 'xxxxx-test-20141218'
block_device_map = BlockDeviceMapping()
block_dev_type = BlockDeviceType(delete_on_termination=True, size=100)
block_device_map['/dev/sda1'] = block_dev_type
user_data = "sudo apt-get -y install htop"

networks = NetworkInterfaceSpecification(
    subnet_id = subnet_id, 
    groups = groups,
    associate_public_ip_address = True)
    
network_interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(networks)

conn = boto.ec2.connect_to_region(ec2_region)
reservation = conn.run_instances(
    ami_id,
    key_name = key_name,
    network_interfaces = network_interfaces,