Exemple #1
0
    def build_block_device_map(ephemeral,
                               number_ephemeral_disks=1,
                               ebs_size=None,
                               iops=None,
                               number_ebs_volumes=1):
        bdm = blockdevicemapping.BlockDeviceMapping()

        if ephemeral:
            # The ephemeral disk
            xvdb = BlockDeviceType()
            xvdb.ephemeral_name = 'ephemeral0'
            bdm['/dev/xvdb'] = xvdb

            if number_ephemeral_disks == 2:
                xvdc = BlockDeviceType()
                xvdc.ephemeral_name = 'ephemeral1'
                bdm['/dev/xvdc'] = xvdc

        if ebs_size:
            for disks in range(0, number_ebs_volumes):
                xvd_n = blockdevicemapping.EBSBlockDeviceType(
                    delete_on_termination=True)
                xvd_n.size = int(ebs_size)  # size in Gigabytes
                if iops:
                    xvd_n.iops = 500
                    xvd_n.volume_type = 'io1'
                else:
                    xvd_n.volume_type = 'gp2'
                last_char = chr(ord('f') + disks)
                bdm['/dev/xvd' + last_char] = xvd_n

        return bdm
Exemple #2
0
    def build_block_device_map(ephemeral, number_ephemeral_disks=1, ebs_size=None, iops=None, number_ebs_volumes=1):
        bdm = blockdevicemapping.BlockDeviceMapping()

        if ephemeral:
            # The ephemeral disk
            xvdb = BlockDeviceType()
            xvdb.ephemeral_name = 'ephemeral0'
            bdm['/dev/xvdb'] = xvdb

            if number_ephemeral_disks == 2:
                xvdc = BlockDeviceType()
                xvdc.ephemeral_name = 'ephemeral1'
                bdm['/dev/xvdc'] = xvdc

        if ebs_size:
            for disks in range(0, number_ebs_volumes):
                xvd_n = blockdevicemapping.EBSBlockDeviceType(delete_on_termination=True)
                xvd_n.size = int(ebs_size)  # size in Gigabytes
                if iops:
                    xvd_n.iops = 500
                    xvd_n.volume_type = 'io1'
                else:
                    xvd_n.volume_type = 'gp2'
                last_char = chr(ord('f') + disks)
                bdm['/dev/xvd' + last_char] = xvd_n

        return bdm
Exemple #3
0
def spotrequest(ec2, nodetype, count, testname, testdate, threads, userdata = None):
    global selected_availability_zone

    maxprice = "0.40"
    if nodetype == 'client':
        instancetype = 'c3.xlarge'
    else:
        # For data nodes
        instancetype = 'c3.4xlarge'

    # Allow an explicit selection if needed...
    #selected_availability_zone = "us-east-1e"
    if not selected_availability_zone:
        selected_availability_zone = random.choice([
            'us-east-1a',
            #'us-east-1b',
            'us-east-1d',
            'us-east-1e',
        ])
    availability_zone = selected_availability_zone

    if userdata == None:
        userdata = """#!/bin/bash
echo {0} > /etc/node_testname
echo {1} > /etc/node_testdate
echo {2} > /etc/node_threads
echo {3} > /etc/node_role
#echo 10.136.71.116 > /etc/node_headnode
echo 400 > /etc/node_swap             # MB of swap created
echo 1 > /etc/node_mongo_uselocal     # Use local mongos shard server on each client
""".format(testname, testdate, threads, nodetype)

    # For some tests we may not need any nodes of this type
    if count == 0:
        return []

    # Default AMI
    ami = 'ami-XXXXXXXX' # Current versions

    # Specify ephemeral block devices...

    bdmap = BlockDeviceMapping()
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    bdmap['/dev/sdb'] = sdb
    sdc = BlockDeviceType()
    sdc.ephemeral_name = 'ephemeral1'
    bdmap['/dev/sdc'] = sdc
    #sdd = BlockDeviceType()
    #sdd.ephemeral_name = 'ephemeral2'
    #bdmap['/dev/sdd'] = sdd
    #sde = BlockDeviceType()
    #sde.ephemeral_name = 'ephemeral3'
    #bdmap['/dev/sde'] = sde

    return ec2.request_spot_instances(maxprice, ami, count=count, launch_group=testdate, availability_zone_group=testdate, security_groups=['epstatic'], user_data=userdata, instance_type=instancetype, block_device_map=bdmap)
Exemple #4
0
 def get_block_device_map(bdmapping_json=None):
     """Parse block_device_mapping JSON and return a configured BlockDeviceMapping object
     Mapping JSON structure...
         {"/dev/sda":
             {"snapshot_id": "snap-23E93E09", "volume_type": null, "delete_on_termination": true, "size": 1}  }
     """
     if bdmapping_json:
         mapping = json.loads(bdmapping_json)
         if mapping:
             bdm = BlockDeviceMapping()
             for key, val in mapping.items():
                 device = BlockDeviceType()
                 if val.get('virtual_name') is not None and val.get(
                         'virtual_name').startswith('ephemeral'):
                     device.ephemeral_name = val.get('virtual_name')
                 else:
                     device.volume_type = 'standard'
                     device.snapshot_id = val.get('snapshot_id') or None
                     device.size = val.get('size')
                     device.delete_on_termination = val.get(
                         'delete_on_termination', False)
                 bdm[key] = device
             return bdm
         return None
     return None
def get_block_device_mapping(instance_type):
    '''
    Given instance type, return the block device mapping for m3 instances.
    We need to do this explicitly because AWS tends to ignore the
    because AMI-specified block device mapping.

    Later on, we could extend this functionality to allow customer to specify
    attaching external EBS volumes.
    '''
    block_map = BlockDeviceMapping()

    # AWS ignores the AMI-specified block device mapping for M3
    # See:
    # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html
    ### For M3 instances, you must specify instance store volumes in the block
    ### device mapping for the instance when you launch it. When you launch
    ### an M3 instance, instance store volumes specified in the block device
    ### mapping for the AMI may be ignored if they are not specified as part of
    ### the instance block device mapping.
    if instance_type.startswith('m3.'):
        for i in range(get_num_disks(instance_type)):
            dev = BlockDeviceType()
            dev.ephemeral_name = 'ephemeral%d' % i
            # The first ephemeral drive is /dev/sdb.
            name = '/dev/sd' + string.ascii_letters[i + 1]
            block_map[name] = dev
    return block_map
def create_block_device_mapping(ami, device_map):
    bdm = BlockDeviceMapping()
    for device, device_info in device_map.items():
        if ami.root_device_type == "instance-store" and \
                not device_info.get("ephemeral_name"):
            # EBS is not supported by S3-backed AMIs at request time
            # EBS volumes can be attached when an instance is running
            continue
        bd = BlockDeviceType()
        if device_info.get('size'):
            bd.size = device_info['size']
        if ami.root_device_name == device:
            ami_size = ami.block_device_mapping[device].size
            if ami.virtualization_type == "hvm":
                # Overwrite root device size for HVM instances, since they
                # cannot be resized online
                bd.size = ami_size
            elif device_info.get('size'):
                # make sure that size is enough for this AMI
                assert ami_size <= device_info['size'], \
                    "Instance root device size cannot be smaller than AMI " \
                    "root device"
        if device_info.get("delete_on_termination") is not False:
            bd.delete_on_termination = True
        if device_info.get("ephemeral_name"):
            bd.ephemeral_name = device_info["ephemeral_name"]
        if device_info.get("volume_type"):
            bd.volume_type = device_info["volume_type"]
            if device_info["volume_type"] == "io1" \
                    and device_info.get("iops"):
                bd.iops = device_info["iops"]

        bdm[device] = bd
    return bdm
Exemple #7
0
def create_block_device_mapping(ami, device_map):
    bdm = BlockDeviceMapping()
    for device, device_info in device_map.items():
        if ami.root_device_type == "instance-store" and \
                not device_info.get("ephemeral_name"):
            # EBS is not supported by S3-backed AMIs at request time
            # EBS volumes can be attached when an instance is running
            continue
        bd = BlockDeviceType()
        if device_info.get('size'):
            bd.size = device_info['size']
        if ami.root_device_name == device:
            ami_size = ami.block_device_mapping[device].size
            if ami.virtualization_type == "hvm":
                # Overwrite root device size for HVM instances, since they
                # cannot be resized online
                bd.size = ami_size
            elif device_info.get('size'):
                # make sure that size is enough for this AMI
                assert ami_size <= device_info['size'], \
                    "Instance root device size cannot be smaller than AMI " \
                    "root device"
        if device_info.get("delete_on_termination") is not False:
            bd.delete_on_termination = True
        if device_info.get("ephemeral_name"):
            bd.ephemeral_name = device_info["ephemeral_name"]
        if device_info.get("volume_type"):
            bd.volume_type = device_info["volume_type"]
            if device_info["volume_type"] == "io1" \
                    and device_info.get("iops"):
                bd.iops = device_info["iops"]

        bdm[device] = bd
    return bdm
Exemple #8
0
def launch_ondemand_request(conn, request, tenant, job):
    try:

        mapping = BlockDeviceMapping()
        sda1 = BlockDeviceType()
        eph0 = BlockDeviceType()
        eph1 = BlockDeviceType()
        eph2 = BlockDeviceType()
        eph3 = BlockDeviceType()
        sda1.size = 10
        eph0.ephemeral_name = 'ephemeral0'
        eph1.ephemeral_name = 'ephemeral1'
        eph2.ephemeral_name = 'ephemeral2'
        eph3.ephemeral_name = 'ephemeral3'
        mapping['/dev/sda1'] = sda1
        mapping['/dev/sdb'] = eph0
        mapping['/dev/sdc'] = eph1
        mapping['/dev/sdd'] = eph2
        mapping['/dev/sde'] = eph3

        # issue a run_instances command for this request
        res = conn.run_instances(min_count=request.count,
                                 max_count=request.count,
                                 key_name=tenant.key_pair,
                                 image_id=request.ami,
                                 security_group_ids=[tenant.security_group],
                                 user_data=customise_cloudinit(tenant, job),
                                 instance_type=request.instance_type,
                                 subnet_id=tenant.subnet,
                                 block_device_map=mapping)
        my_req_ids = [req.id for req in res.instances]
        # address = ""
        for req in my_req_ids:
            # tag each request
            tag_requests(req, tenant.name, conn)
            # update the database to include the new request
            ProvisionerConfig().dbconn.execute(
                ("insert into instance_request (tenant, instance_type, " +
                 "price, job_runner_id, request_type, request_id, " +
                 "subnet) values ('%s', '%s', %s, %s, '%s', '%s', %s)") %
                (tenant.db_id, request.instance.db_id,
                 request.instance.ondemand, job.id, "ondemand", req,
                 tenant.subnet_id))
            # ProvisionerConfig().dbconn.commit()
            return
    except boto.exception.EC2ResponseError:
        logger.exception("There was an error communicating with EC2.")
Exemple #9
0
def _parse_block_device_mappings(user_input):
    """
    Parse block device mappings per AWS CLI tools syntax (modified to add IOPS)

    http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html

    Syntax:
    /dev/xvd[a-z]=[snapshot-id|ephemeral]:[size in GB]:[Delete on Term]:[IOPS]
    - Leave inapplicable fields blank
    - Delete on Termination defaults to True
    - IOPS limits are not validated
    - EBS sizing is not validated

    Mount an Ephemeral Drive:
    /dev/xvdb1=ephemeral0

    Mount multiple Ephemeral Drives:
    /dev/xvdb1=ephemeral0,/dev/xvdb2=ephemeral1

    Mount a Snapshot:
    /dev/xvdp=snap-1234abcd

    Mount a Snapshot to a 100GB drive:
    /dev/xvdp=snap-1234abcd:100

    Mount a Snapshot to a 100GB drive and do not delete on termination:
    /dev/xvdp=snap-1234abcd:100:false

    Mount a Fresh 100GB EBS device
    /dev/xvdp=:100

    Mount a Fresh 100GB EBS Device and do not delete on termination:
    /dev/xvdp=:100:false

    Mount a Fresh 100GB EBS Device with 1000 IOPS
    /dev/xvdp=:100::1000
    """
    block_device_map = BlockDeviceMapping()
    mappings = user_input.split(",")
    for mapping in mappings:
        block_type = BlockDeviceType()
        mount_point, drive_type, size, delete, iops = _parse_drive_mapping(
            mapping)
        if 'ephemeral' in drive_type:
            block_type.ephemeral_name = drive_type
        elif 'snap' in drive_type:
            block_type.snapshot_id = drive_type
            block_type.volume_type = "standard"
        else:
            block_type.volume_type = "standard"
        block_type.size = size
        block_type.delete_on_termination = delete

        if iops:
            block_type.iops = iops
            block_type.volume_type = "io1"

        block_device_map[mount_point] = block_type
    return block_device_map
def test_create_launch_configuration_with_block_device_mappings():
    block_device_mapping = BlockDeviceMapping()

    ephemeral_drive = BlockDeviceType()
    ephemeral_drive.ephemeral_name = 'ephemeral0'
    block_device_mapping['/dev/xvdb'] = ephemeral_drive

    snapshot_drive = BlockDeviceType()
    snapshot_drive.snapshot_id = "snap-1234abcd"
    snapshot_drive.volume_type = "standard"
    block_device_mapping['/dev/xvdp'] = snapshot_drive

    ebs_drive = BlockDeviceType()
    ebs_drive.volume_type = "io1"
    ebs_drive.size = 100
    ebs_drive.iops = 1000
    ebs_drive.delete_on_termination = False
    block_device_mapping['/dev/xvdh'] = ebs_drive

    conn = boto.connect_autoscale(use_block_device_types=True)
    config = LaunchConfiguration(
        name='tester',
        image_id='ami-abcd1234',
        instance_type='m1.small',
        key_name='the_keys',
        security_groups=["default", "default2"],
        user_data="This is some user_data",
        instance_monitoring=True,
        instance_profile_name='arn:aws:iam::123456789012:instance-profile/testing',
        spot_price=0.1,
        block_device_mappings=[block_device_mapping]
    )
    conn.create_launch_configuration(config)

    launch_config = conn.get_all_launch_configurations()[0]
    launch_config.name.should.equal('tester')
    launch_config.image_id.should.equal('ami-abcd1234')
    launch_config.instance_type.should.equal('m1.small')
    launch_config.key_name.should.equal('the_keys')
    set(launch_config.security_groups).should.equal(set(['default', 'default2']))
    launch_config.user_data.should.equal("This is some user_data")
    launch_config.instance_monitoring.enabled.should.equal('true')
    launch_config.instance_profile_name.should.equal('arn:aws:iam::123456789012:instance-profile/testing')
    launch_config.spot_price.should.equal(0.1)
    len(launch_config.block_device_mappings).should.equal(3)

    returned_mapping = launch_config.block_device_mappings

    set(returned_mapping.keys()).should.equal(set(['/dev/xvdb', '/dev/xvdp', '/dev/xvdh']))

    returned_mapping['/dev/xvdh'].iops.should.equal(1000)
    returned_mapping['/dev/xvdh'].size.should.equal(100)
    returned_mapping['/dev/xvdh'].volume_type.should.equal("io1")
    returned_mapping['/dev/xvdh'].delete_on_termination.should.be.false

    returned_mapping['/dev/xvdp'].snapshot_id.should.equal("snap-1234abcd")
    returned_mapping['/dev/xvdp'].volume_type.should.equal("standard")

    returned_mapping['/dev/xvdb'].ephemeral_name.should.equal('ephemeral0')
Exemple #11
0
def _parse_block_device_mappings(user_input):
    """
    Parse block device mappings per AWS CLI tools syntax (modified to add IOPS)

    http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html

    Syntax:
    /dev/xvd[a-z]=[snapshot-id|ephemeral]:[size in GB]:[Delete on Term]:[IOPS]
    - Leave inapplicable fields blank
    - Delete on Termination defaults to True
    - IOPS limits are not validated
    - EBS sizing is not validated

    Mount an Ephemeral Drive:
    /dev/xvdb1=ephemeral0

    Mount multiple Ephemeral Drives:
    /dev/xvdb1=ephemeral0,/dev/xvdb2=ephemeral1

    Mount a Snapshot:
    /dev/xvdp=snap-1234abcd

    Mount a Snapshot to a 100GB drive:
    /dev/xvdp=snap-1234abcd:100

    Mount a Snapshot to a 100GB drive and do not delete on termination:
    /dev/xvdp=snap-1234abcd:100:false

    Mount a Fresh 100GB EBS device
    /dev/xvdp=:100

    Mount a Fresh 100GB EBS Device and do not delete on termination:
    /dev/xvdp=:100:false

    Mount a Fresh 100GB EBS Device with 1000 IOPS
    /dev/xvdp=:100::1000
    """
    block_device_map = BlockDeviceMapping()
    mappings = user_input.split(",")
    for mapping in mappings:
        block_type = BlockDeviceType()
        mount_point, drive_type, size, delete, iops = _parse_drive_mapping(mapping)
        if 'ephemeral' in drive_type:
            block_type.ephemeral_name = drive_type
        elif 'snap' in drive_type:
            block_type.snapshot_id = drive_type
            block_type.volume_type = "standard"
        else:
            block_type.volume_type = "standard"
        block_type.size = size
        block_type.delete_on_termination = delete

        if iops:
            block_type.iops = iops
            block_type.volume_type = "io1"

        block_device_map[mount_point] = block_type
    return block_device_map
def create_mapping(config):
    if 'mapping' not in config:
        return None
    mapping = BlockDeviceMapping()
    for ephemeral_name, device_path in config['mapping'].iteritems():
        ephemeral = BlockDeviceType()
        ephemeral.ephemeral_name = ephemeral_name
        mapping[device_path] = ephemeral
    return mapping
Exemple #13
0
def launch(name, ami='ami-3d4ff254', instance_type='t1.micro', key_name='amazon2', 
           zone='us-east-1d', security_group='quicklaunch-1', user='******', job=None):
    '''Launch a single instance of the provided ami '''
    conn = EC2Connection()
    # Declare the block device mapping for ephemeral disks
    mapping = BlockDeviceMapping()
    eph0 = BlockDeviceType()
    eph1 = BlockDeviceType()
    eph0.ephemeral_name = 'ephemeral0'
    eph1.ephemeral_name = 'ephemeral1'
    mapping['/dev/sdb'] = eph0
    mapping['/dev/sdc'] = eph1
    # Now, ask for a reservation
    reservation = conn.run_instances(ami, instance_type=instance_type, 
                                     key_name=key_name, placement=zone, 
                                     block_device_map=mapping, security_groups=[security_group])
    # And assume that the instance we're talking about is the first in the list
    # This is not always a good assumption, and will likely depend on the specifics
    # of your launching situation. For launching an isolated instance while no
    # other actions are taking place, this is sufficient.
    instance = reservation.instances[0]
    print('Waiting for instance to start...')
    # Check up on its status every so often
    status = instance.update()
    while status == 'pending':
        time.sleep(5)
        status = instance.update()
    if status == 'running':
        print('New instance "' + instance.id + '" accessible at ' + instance.public_dns_name)
        # Name the instance
        conn.create_tags([instance.id], {'Name': name})

        n = Node(name, instance.id, instance.image_id, instance.key_name, instance.placement,
                instance.instance_type, instance.dns_name, instance.private_dns_name,
                instance.ip_address, instance.private_ip_address, user, job)
    
        pprint.pprint(n.to_dict())
        addNode(n)
    
    else:
        print('Instance status: ' + status)
        return
def _create_device_map(ephemeral_disk_count):
  """Creates a block device out of the ephemeral disks on this instance."""
  device_map = BlockDeviceMapping()
  device_paths = _get_device_paths(ephemeral_disk_count)

  for index, device_path in enumerate(device_paths):
    device = BlockDeviceType()
    device.ephemeral_name = "ephemeral{}".format(index)
    device_map[device_path] = device

  return device_map
Exemple #15
0
def block_mappings(flavor):
  mapping = BlockDeviceMapping()
  if flavor == "hs1.8xlarge":
    for i in range(0, 24):
      eph = BlockDeviceType()
      eph.ephemeral_name = "ephemeral%d" % i
      device = "/dev/sd%c1" % chr(ord('b') + i)
      mapping[device] = eph
  elif flavor == "hi1.4xlarge":
    for i in range(0, 2):
      eph = BlockDeviceType()
      eph.ephemeral_name = "ephemeral%d" % i
      device = "/dev/sd%c1" % chr(ord('b') + i)
      mapping[device] = eph
  elif flavor == "m1.xlarge":
    for i in range(0, 4):
      eph = BlockDeviceType()
      eph.ephemeral_name = "ephemeral%d" % i
      device = "/dev/sd%c1" % chr(ord('b') + i)
      mapping[device] = eph
  return mapping
Exemple #16
0
    def __call__(self, config, sectionname):
        from boto.ec2.blockdevicemapping import BlockDeviceMapping
        from boto.ec2.blockdevicemapping import BlockDeviceType

        value = BaseMassager.__call__(self, config, sectionname)
        device_map = BlockDeviceMapping()
        for mapping in value.split():
            device_path, ephemeral_name = mapping.split(':')
            device = BlockDeviceType()
            device.ephemeral_name = ephemeral_name
            device_map[device_path] = device
        return device_map
Exemple #17
0
    def __call__(self, config, sectionname):
        from boto.ec2.blockdevicemapping import BlockDeviceMapping
        from boto.ec2.blockdevicemapping import BlockDeviceType

        value = BaseMassager.__call__(self, config, sectionname)
        device_map = BlockDeviceMapping()
        for mapping in value.split():
            device_path, ephemeral_name = mapping.split(':')
            device = BlockDeviceType()
            device.ephemeral_name = ephemeral_name
            device_map[device_path] = device
        return device_map
Exemple #18
0
def register(snapshot_id,
             region,
             arch,
             size=None,
             name=None,
             desc=None,
             pvm=False):
    conn = utils.connect(region)

    if None in (name, size):
        log.debug('getting snapshot - %s', snapshot_id)
        snapshot = conn.get_all_snapshots(snapshot_ids=[snapshot_id])[0]
        size = size if size else snapshot.volume_size
        name = name if name else snapshot.description

    virt = 'hvm'
    kernel_id = None
    device_base = '/dev/xvd'
    ec2_arch = "x86_64" if arch == "amd64" else arch

    if pvm:
        kernel_id = utils.get_kernel(region, arch)
        virt = 'paravirtual'
        device_base = '/dev/sd'
        name += '-pvm'

    log.debug('creating block_device_map')
    block_device_map = BlockDeviceMapping()

    rootfs = BlockDeviceType()
    rootfs.delete_on_termination = True
    rootfs.size = size
    rootfs.snapshot_id = snapshot_id
    rootfs_device_name = device_base + 'a'
    block_device_map[rootfs_device_name] = rootfs

    ephemeral = BlockDeviceType()
    ephemeral.ephemeral_name = 'ephemeral0'
    ephemeral_device_name = device_base + 'b'
    block_device_map[ephemeral_device_name] = ephemeral

    log.debug('registering image - %s', name)
    ami_id = conn.register_image(name=name,
                                 description=desc,
                                 kernel_id=kernel_id,
                                 architecture=ec2_arch,
                                 root_device_name=rootfs_device_name,
                                 block_device_map=block_device_map,
                                 virtualization_type=virt)

    log.info('registered image - %s %s %s', ami_id, name, region)
    return ami_id, name
Exemple #19
0
 def launch_instance(self):
     if not self.verify_settings():
         return
     block_map = BlockDeviceMapping()
     root_device = self.config["ec2_root_device"]
     block_map[root_device] = EBSBlockDeviceType()
     if self.config["ec2_size"]:
         block_map[root_device].size = self.config["ec2_size"]
     block_map[root_device].delete_on_termination = True
     for num, device_location in enumerate(self.config["ec2_ephemeral_devices"]):
         device = BlockDeviceType()
         device.ephemeral_name = "ephemeral%d" % num
         block_map[device_location] = device
     reservation = self.conn.run_instances(
         self.config["ec2_ami_id"],
         key_name=self.config["ec2_key_name"],
         security_groups=self.config["ec2_security_groups"] or [self.config["ec2_security_group"]],
         instance_type=self.config["ec2_instance_type"],
         placement=self.config["ec2_zone"],
         monitoring_enabled=self.config["ec2_monitoring_enabled"],
         block_device_map=block_map,
         user_data=self.user_data,
     )
     self.instance = reservation.instances[0]
     secs = RUN_INSTANCE_TIMEOUT
     rest_interval = 5
     while secs and not self.instance.state == "running":
         time.sleep(rest_interval)
         secs = secs - rest_interval
         try:
             self.instance.update()
         except boto.exception.EC2ResponseError:
             pass
     if secs <= 0:
         errmsg = "run instance %s failed after %d seconds" % (self.instance.id, RUN_INSTANCE_TIMEOUT)
         LOG.error(errmsg)
     else:
         if self.config["hostname"]:
             self.assign_name_tag()
         msg1 = "Started Instance: {0}\n".format(self.instance.id)
         LOG.info(msg1)
         print msg1
         p = int(self.config["ssh_port"])
         port = "-p {0} ".format(p) if p and not p == 22 else ""
         ## change user to 'root' for all non-Ubuntu systems
         user = self.config["sudouser"] if self.config["sudouser"] and self.config["ssh_import"] else "ubuntu"
         # XXX - TODO: replace public dns with fqdn, where appropriate
         msg2 = "To access: ssh {0}{1}@{2}\n" "To terminate: shaker-terminate {3}".format(
             port, user, self.instance.public_dns_name, self.instance.id
         )
         LOG.info(msg2)
         print msg2
Exemple #20
0
def demandrequest(ec2, nodetype, testname, testdate, threads, userdata = None):
    global selected_availability_zone

    if nodetype == 'client':
        instancetype = 'c3.xlarge'
    else:
        instancetype = 'i2.xlarge'

    # Allow an explicit selection if needed...
    #selected_availability_zone = 'us-east-1e'
    if not selected_availability_zone:
        selected_availability_zone = random.choice([
            'us-east-1a',
            #'us-east-1b',
            'us-east-1d',
            'us-east-1e',
        ])
    availability_zone = selected_availability_zone

    if userdata == None:
        userdata = """#!/bin/bash
echo {0} > /etc/node_testname
echo {1} > /etc/node_testdate
echo {2} > /etc/node_threads
echo {3} > /etc/node_role
echo 10.136.71.116 > /etc/node_headnode
echo 400 > /etc/node_swap             # MB of swap created
echo 1 > /etc/node_mongo_uselocal     # Use local mongos shard server on each client
""".format(testname, testdate, threads, nodetype)

    # Default AMI
    ami = 'ami-XXXXXXXX' # Current versions

    # Specify ephemeral block devices...

    bdmap = BlockDeviceMapping()
    sdb = BlockDeviceType()
    sdb.ephemeral_name = 'ephemeral0'
    bdmap['/dev/sdb'] = sdb
    #sdc = BlockDeviceType()
    #sdc.ephemeral_name = 'ephemeral1'
    #bdmap['/dev/sdc'] = sdc
    #sdd = BlockDeviceType()
    #sdd.ephemeral_name = 'ephemeral2'
    #bdmap['/dev/sdd'] = sdd
    #sde = BlockDeviceType()
    #sde.ephemeral_name = 'ephemeral3'
    #bdmap['/dev/sde'] = sde

    return ec2.run_instances(ami, placement=availability_zone, security_groups=['epstatic'], user_data=userdata, instance_type=instancetype, block_device_map=bdmap)
Exemple #21
0
    def _process_block_device_mappings(self, launch_config, zone=None):
        """
        Processes block device mapping information
        and returns a Boto BlockDeviceMapping object. If new volumes
        are requested (source is None and destination is VOLUME), they will be
        created and the relevant volume ids included in the mapping.
        """
        bdm = BlockDeviceMapping()
        # Assign letters from f onwards
        # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
        next_letter = iter(list(string.ascii_lowercase[6:]))
        # assign ephemeral devices from 0 onwards
        ephemeral_counter = 0
        for device in launch_config.block_devices:
            bd_type = BlockDeviceType()

            if device.is_volume:
                if device.is_root:
                    bdm['/dev/sda1'] = bd_type
                else:
                    bdm['sd' + next(next_letter)] = bd_type

                if isinstance(device.source, Snapshot):
                    bd_type.snapshot_id = device.source.id
                elif isinstance(device.source, Volume):
                    bd_type.volume_id = device.source.id
                elif isinstance(device.source, MachineImage):
                    # Not supported
                    pass
                else:
                    # source is None, but destination is volume, therefore
                    # create a blank volume. If the Zone is None, this
                    # could fail since the volume and instance may be created
                    # in two different zones.
                    if not zone:
                        raise InvalidConfigurationException(
                            "A zone must be specified when launching with a"
                            " new blank volume block device mapping.")
                    new_vol = self.provider.block_store.volumes.create(
                        '',
                        device.size,
                        zone)
                    bd_type.volume_id = new_vol.id
                bd_type.delete_on_terminate = device.delete_on_terminate
                if device.size:
                    bd_type.size = device.size
            else:  # device is ephemeral
                bd_type.ephemeral_name = 'ephemeral%s' % ephemeral_counter

        return bdm
Exemple #22
0
 def _get_bmap(self, params):
     bmap = BlockDeviceMapping()
     for device in params['bmap']:
         if not 'name' in device.keys():
             self.logger.debug('bad device ' + str(device))
             continue
         dev = BlockDeviceType()
         if 'size' in device.keys():
             dev.size = device['size']
         if 'delete_on_termination' in device.keys():
             dev.delete_on_termination = device['delete_on_termination']
         if 'ephemeral_name' in device.keys():
             dev.ephemeral_name = device['ephemeral_name']
         bmap[device['name']] = dev
     return bmap
Exemple #23
0
 def _parse_block_device_mappings(self):
     block_device_map = BlockDeviceMapping()
     for mapping in self.block_device_mapping_dict:
         block_type = BlockDeviceType()
         mount_point = mapping.get('device_name')
         if 'ephemeral' in mapping.get('virtual_name', ''):
             block_type.ephemeral_name = mapping.get('virtual_name')
         else:
             block_type.volume_type = mapping.get('ebs._volume_type')
             block_type.snapshot_id = mapping.get('ebs._snapshot_id')
             block_type.delete_on_termination = mapping.get('ebs._delete_on_termination')
             block_type.size = mapping.get('ebs._volume_size')
             block_type.iops = mapping.get('ebs._iops')
         block_device_map[mount_point] = block_type
     return block_device_map
Exemple #24
0
 def _parse_block_device_mappings(self):
     block_device_map = BlockDeviceMapping()
     for mapping in self.block_device_mapping_dict:
         block_type = BlockDeviceType()
         mount_point = mapping.get('device_name')
         if 'ephemeral' in mapping.get('virtual_name', ''):
             block_type.ephemeral_name = mapping.get('virtual_name')
         else:
             block_type.volume_type = mapping.get('ebs._volume_type')
             block_type.snapshot_id = mapping.get('ebs._snapshot_id')
             block_type.delete_on_termination = mapping.get('ebs._delete_on_termination')
             block_type.size = mapping.get('ebs._volume_size')
             block_type.iops = mapping.get('ebs._iops')
         block_device_map[mount_point] = block_type
     return block_device_map
Exemple #25
0
 def _get_bmap(self, params):
     bmap = BlockDeviceMapping()
     for device in params['bmap']:
         if not 'name' in device.keys():
             self.logger.debug('bad device ' + str(device))
             continue
         dev = BlockDeviceType()
         if 'size' in device.keys():
             dev.size = device['size']
         if 'delete_on_termination' in device.keys():
             dev.delete_on_termination = device['delete_on_termination']
         if 'ephemeral_name' in device.keys():
             dev.ephemeral_name = device['ephemeral_name']
         bmap[device['name']] = dev
     return bmap
Exemple #26
0
    def _process_block_device_mappings(self, launch_config, zone=None):
        """
        Processes block device mapping information
        and returns a Boto BlockDeviceMapping object. If new volumes
        are requested (source is None and destination is VOLUME), they will be
        created and the relevant volume ids included in the mapping.
        """
        bdm = BlockDeviceMapping()
        # Assign letters from f onwards
        # http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/device_naming.html
        next_letter = iter(list(string.ascii_lowercase[6:]))
        # assign ephemeral devices from 0 onwards
        ephemeral_counter = 0
        for device in launch_config.block_devices:
            bd_type = BlockDeviceType()

            if device.is_volume:
                if device.is_root:
                    bdm['/dev/sda1'] = bd_type
                else:
                    bdm['sd' + next(next_letter)] = bd_type

                if isinstance(device.source, Snapshot):
                    bd_type.snapshot_id = device.source.id
                elif isinstance(device.source, Volume):
                    bd_type.volume_id = device.source.id
                elif isinstance(device.source, MachineImage):
                    # Not supported
                    pass
                else:
                    # source is None, but destination is volume, therefore
                    # create a blank volume. If the Zone is None, this
                    # could fail since the volume and instance may be created
                    # in two different zones.
                    if not zone:
                        raise InvalidConfigurationException(
                            "A zone must be specified when launching with a"
                            " new blank volume block device mapping.")
                    new_vol = self.provider.block_store.volumes.create(
                        '', device.size, zone)
                    bd_type.volume_id = new_vol.id
                bd_type.delete_on_terminate = device.delete_on_terminate
                if device.size:
                    bd_type.size = device.size
            else:  # device is ephemeral
                bd_type.ephemeral_name = 'ephemeral%s' % ephemeral_counter

        return bdm
Exemple #27
0
def start_instances(ec2cxn,
                    instance_count,
                    image_id,
                    use_ephemeral=False,
                    instance_type="c1.xlarge"):

    print "Attempting to start ", instance_count, " instances of image: ", image_id

    if use_ephemeral:
        dev_map = BlockDeviceMapping()
        sdb1 = BlockDeviceType()
        sdb1.ephemeral_name = 'ephemeral0'
        dev_map['/dev/sdb1'] = sdb1
        reservation = ec2cxn.run_instances(image_id,
                                           min_count=1,
                                           max_count=instance_count,
                                           block_device_map=dev_map,
                                           security_groups=SECURITY_GROUPS,
                                           key_name="capk",
                                           instance_type=instance_type)
    else:
        reservation = ec2cxn.run_instances(image_id,
                                           min_count=1,
                                           max_count=instance_count,
                                           security_groups=SECURITY_GROUPS,
                                           key_name="capk",
                                           instance_type=instance_type)

    instances = reservation.instances

    # never leave instances running at the end of the script
    def kill_instances():
        for instance in instances:
            instance.update()
            if instance.state != 'terminated':
                print "Killing ", instance
                instance.terminate()

    atexit.register(kill_instances)

    print "Started ", instance_count, " instances"
    for i in instances:
        print "  =>", i.id
    if len(instances) != instance_count:
        print "Expected %d instances, got %d" % (instance_count,
                                                 len(instances))

    return instances
Exemple #28
0
 def startCluster(self, argv):
     if len(argv) != 0:
         print "ec2 startCluster"
         sys.exit(-1)
     regions = boto.ec2.regions()
     regionInfo = '\n'.join(str(region).split(':')[1] for region in regions)
     regionName = raw_input("select region:\n%s\n>>"%regionInfo)
     region = boto.ec2.get_region(regionName)
     conn = region.connect()
     print "region connected successfully"
     images = conn.get_all_images(owners='self')
     imageInfo = '\n'.join(
         str(image).split(':')[1] + ":" + image.name for image in images)
     imageId = raw_input("enter imageId:\nself-created images:\n%s\n>>"%imageInfo)
     instanceTypeInfo = ("m1.small, " "m1.large, " "m1.xlarge\n"
                         "c1.medium, " "c1.xlarge\n"
                         "m2.xlarge, " "m2.2xlarge, " "m2.4xlarge\n"
                         "cc1.4xlarge, " "t1.micro\n")
     instanceType = raw_input("enter instanceType:\n%s\n>>"%instanceTypeInfo)
     availZone = raw_input("enter placement[a,b,c]:\n>>")
     availZone = regionName + availZone
     diskSize = int(raw_input("enter disk size[G]:\n>>"))
     rootDev = BlockDeviceType()
     rootDev.name = 'root'
     rootDev.size = diskSize
     rootDev.delete_on_termination = True
     instStorage = bool(raw_input("mount inst storage?\n>>"))
     mapping = BlockDeviceMapping()
     mapping['/dev/sda1'] = rootDev
     if (instStorage == True):
         eph0 = BlockDeviceType()
         eph0.ephemeral_name = 'ephemeral0'
     mapping['/dev/sdb'] = eph0
     groups = conn.get_all_security_groups()
     groupInfo = '\n'.join(str(group).split(':')[1] for group in groups)
     group = raw_input("enter securityGroup:\n%s\n>>"%groupInfo)
     keys = conn.get_all_key_pairs()
     if len(keys) == 1:
         key = keys[0].name
         print 'using default key: ' + key
     else:
         keyInfo = '\n'.join(str(key).split(':')[1] for key in keys)
         key = raw_input("enter key name:\n%s\n>>"%keyInfo)
     numNodes = int(raw_input("number of nodes:\n>>"))
     conn.run_instances(
         imageId, min_count=numNodes, max_count=numNodes, placement=availZone,
         security_groups = [group], instance_type=instanceType,
         block_device_map=mapping, key_name=key)
Exemple #29
0
 def _parse_block_device_mappings(self):
     block_device_map = BlockDeviceMapping()
     for mapping in self.block_device_mapping_dict:
         block_type = BlockDeviceType()
         mount_point = mapping.get("device_name")
         if "ephemeral" in mapping.get("virtual_name", ""):
             block_type.ephemeral_name = mapping.get("virtual_name")
         else:
             block_type.volume_type = mapping.get("ebs._volume_type")
             block_type.snapshot_id = mapping.get("ebs._snapshot_id")
             block_type.delete_on_termination = mapping.get(
                 "ebs._delete_on_termination")
             block_type.size = mapping.get("ebs._volume_size")
             block_type.iops = mapping.get("ebs._iops")
         block_device_map[mount_point] = block_type
     return block_device_map
Exemple #30
0
def register(snapshot_id, region, arch, size=None, name=None, desc=None, pvm=False):
    conn = utils.connect(region)

    if None in (name, size):
        log.debug('getting snapshot - %s', snapshot_id)
        snapshot = conn.get_all_snapshots(snapshot_ids=[snapshot_id])[0]
        size = size if size else snapshot.volume_size
        name = name if name else snapshot.description

    virt = 'hvm'
    kernel_id = None
    device_base = '/dev/xvd'
    ec2_arch = "x86_64" if arch == "amd64" else arch

    if pvm:
        kernel_id = utils.get_kernel(region, arch)
        virt = 'paravirtual'
        device_base = '/dev/sd'
        name += '-pvm'

    log.debug('creating block_device_map')
    block_device_map = BlockDeviceMapping()

    rootfs = BlockDeviceType()
    rootfs.delete_on_termination = True
    rootfs.size = size
    rootfs.snapshot_id = snapshot_id
    rootfs_device_name = device_base + 'a'
    block_device_map[rootfs_device_name] = rootfs

    ephemeral = BlockDeviceType()
    ephemeral.ephemeral_name = 'ephemeral0'
    ephemeral_device_name = device_base + 'b'
    block_device_map[ephemeral_device_name] = ephemeral

    log.debug('registering image - %s', name)
    ami_id = conn.register_image(
        name=name,
        description=desc,
        kernel_id=kernel_id,
        architecture=ec2_arch,
        root_device_name=rootfs_device_name,
        block_device_map=block_device_map,
        virtualization_type=virt)

    log.info('registered image - %s %s %s', ami_id, name, region)
    return ami_id, name
Exemple #31
0
def get_block_device(instance_type, ebs_vol_size):
    block_map = BlockDeviceMapping()

    if ebs_vol_size > 0:
        device = EBSBlockDeviceType()
        device.size = ebs_vol_size
        device.delete_on_termination = True
        block_map["/dev/sdv"] = device

    for i in range(get_num_disks(instance_type)):
        dev = BlockDeviceType()
        dev.ephemeral_name = 'ephemeral%d' % i
        # The first ephemeral drive is /dev/sdb.
        name = '/dev/sd' + string.ascii_letters[i + 1]
        block_map[name] = dev

    return block_map
def get_block_device(instance_type, ebs_vol_size):
    block_map = BlockDeviceMapping()

    if ebs_vol_size > 0:
        device = EBSBlockDeviceType()
        device.size = ebs_vol_size
        device.delete_on_termination = True
        block_map['/dev/sdv'] = device

    for i in range(get_num_disks(instance_type)):
        dev = BlockDeviceType()
        dev.ephemeral_name = 'ephemeral%d' % i
        # The first ephemeral drive is /dev/sdb.
        name = '/dev/sd' + string.ascii_letters[i + 1]
        block_map[name] = dev

    return block_map
def start_instances(ec2cxn, instance_count, image_id, use_ephemeral = False, instance_type="c1.xlarge"):
    
    print "Attempting to start ", instance_count, " instances of image: ", image_id     
    
    if use_ephemeral:
        dev_map = BlockDeviceMapping()
        sdb1 = BlockDeviceType()
        sdb1.ephemeral_name = 'ephemeral0'
        dev_map['/dev/sdb1'] = sdb1
        reservation = ec2cxn.run_instances(
          image_id, min_count=1, 
          max_count=instance_count, 
          block_device_map=dev_map, 
          security_groups=SECURITY_GROUPS, 
          key_name="capk", 
          instance_type=instance_type)
    else:
        reservation = ec2cxn.run_instances(
          image_id, 
           min_count=1, 
           max_count=instance_count, 
           security_groups=SECURITY_GROUPS, 
           key_name="capk", 
           instance_type=instance_type)

    instances = reservation.instances
    # never leave instances running at the end of the script
    def kill_instances():
      for instance in instances:
        instance.update()
        if instance.state != 'terminated':
          print "Killing ", instance 
          instance.terminate()
    atexit.register(kill_instances)

    print "Started ", instance_count, " instances"
    for i in instances:
        print "  =>", i.id
    if len(instances) != instance_count:
      print "Expected %d instances, got %d" % (instance_count, len(instances))
       
    return instances
Exemple #34
0
def register(snapshot_id, region, arch, size=None, name=None, desc=None):
    conn = utils.connect(region)

    if None in (name, size):
        log.debug("getting snapshot - %s", snapshot_id)
        snapshot = conn.get_all_snapshots(snapshot_ids=[snapshot_id])[0]
        size = size if size else snapshot.volume_size
        name = name if name else snapshot.description

    ec2_arch = "x86_64" if arch == "amd64" else arch

    log.debug("creating block_device_map")
    block_device_map = BlockDeviceMapping()

    rootfs = BlockDeviceType()
    rootfs.delete_on_termination = True
    rootfs.size = size
    rootfs.snapshot_id = snapshot_id
    rootfs_device_name = "/dev/xvda"
    block_device_map[rootfs_device_name] = rootfs

    ephemeral = BlockDeviceType()
    ephemeral.ephemeral_name = "ephemeral0"
    ephemeral_device_name = "/dev/xvdb"
    block_device_map[ephemeral_device_name] = ephemeral

    log.debug("registering image - %s", name)
    ami_id = conn.register_image(
        name=name,
        description=desc,
        architecture=ec2_arch,
        root_device_name=rootfs_device_name,
        block_device_map=block_device_map,
        virtualization_type="hvm",
    )

    log.info("registered image - %s %s %s", ami_id, name, region)
    return ami_id, name
def register(snapshot_id, region, size=None, arch=None, name=None, desc=None):
    conn = utils.connect(region)

    log.debug('getting snapshot - %s', snapshot_id)
    snapshot = conn.get_all_snapshots(snapshot_ids=[snapshot_id])[0]

    size = size if size else snapshot.volume_size
    name = name if name else snapshot.description
    desc = desc if desc else utils.parse_imagename(name)['url']
    arch = arch if arch else utils.parse_imagename(name)['architecture']

    kernel_id = utils.get_kernel(region, arch)
    arch_ec2 = "x86_64" if arch == "amd64" else arch

    log.debug('creating block_device_map')
    rootfs = BlockDeviceType()
    rootfs.delete_on_termination = True
    rootfs.size = size
    rootfs.snapshot_id = snapshot_id

    ephemeral = BlockDeviceType()
    ephemeral.ephemeral_name = 'ephemeral0'

    block_device_map = BlockDeviceMapping()
    block_device_map['/dev/sda1'] = rootfs
    block_device_map['/dev/sda2'] = ephemeral

    log.debug('registering image - %s', name)
    ami_id = conn.register_image(
        name=name,
        description=desc,
        architecture=arch_ec2,
        kernel_id=kernel_id,
        root_device_name="/dev/sda1",
        block_device_map=block_device_map)

    log.info('registered image - %s %s %s', ami_id, name, region)
    return ami_id
Exemple #36
0
 def get_block_device_map(bdmapping_json=None):
     """Parse block_device_mapping JSON and return a configured BlockDeviceMapping object
     Mapping JSON structure...
         {"/dev/sda":
             {"snapshot_id": "snap-23E93E09", "volume_type": null, "delete_on_termination": true, "size": 1}  }
     """
     if bdmapping_json:
         mapping = json.loads(bdmapping_json)
         if mapping:
             bdm = BlockDeviceMapping()
             for key, val in mapping.items():
                 device = BlockDeviceType()
                 if val.get('virtual_name') is not None and val.get('virtual_name').startswith('ephemeral'):
                     device.ephemeral_name = val.get('virtual_name')
                 else:
                     device.volume_type = 'standard'
                     device.snapshot_id = val.get('snapshot_id') or None
                     device.size = val.get('size')
                     device.delete_on_termination = val.get('delete_on_termination', False)
                 bdm[key] = device
             return bdm
         return None
     return None
Exemple #37
0
 def parse_block_device_args(self, block_device_maps_args):
     block_device_map = BlockDeviceMapping()
     for block_device_map_arg in block_device_maps_args:
         parts = block_device_map_arg.split('=')
         if len(parts) > 1:
             device_name = parts[0]
             block_dev_type = BlockDeviceType()
             value_parts = parts[1].split(':')
             if value_parts[0].startswith('snap'):
                 block_dev_type.snapshot_id = value_parts[0]
             else:
                 if value_parts[0].startswith('ephemeral'):
                     block_dev_type.ephemeral_name = value_parts[0]
             if len(value_parts) > 1:
                 try:
                     block_dev_type.size = int(value_parts[1])
                 except ValueError:
                     pass
             if len(value_parts) > 2:
                 if value_parts[2] == 'true':
                     block_dev_type.delete_on_termination = True
             block_device_map[device_name] = block_dev_type
     return block_device_map
Exemple #38
0
 def parse_block_device_args(self, block_device_maps_args):
     block_device_map = BlockDeviceMapping()
     for block_device_map_arg in block_device_maps_args:
         parts = block_device_map_arg.split('=')
         if len(parts) > 1:
             device_name = parts[0]
             block_dev_type = BlockDeviceType()
             value_parts = parts[1].split(':')
             if value_parts[0].startswith('snap'):
                 block_dev_type.snapshot_id = value_parts[0]
             else:
                 if value_parts[0].startswith('ephemeral'):
                     block_dev_type.ephemeral_name = value_parts[0]
             if len(value_parts) > 1:
                 try:
                     block_dev_type.size = int(value_parts[1])
                 except ValueError:
                     pass
             if len(value_parts) > 2:
                 if value_parts[2] == 'true':
                     block_dev_type.delete_on_termination = True
             block_device_map[device_name] = block_dev_type
     return block_device_map
def register(snapshot_id, region, size=None, arch=None, name=None, desc=None):
    conn = utils.connect(region)

    log.debug('getting snapshot - %s', snapshot_id)
    snapshot = conn.get_all_snapshots(snapshot_ids=[snapshot_id])[0]

    size = size if size else snapshot.volume_size
    name = name if name else snapshot.description
    desc = desc if desc else utils.parse_imagename(name)['url']
    arch = arch if arch else utils.parse_imagename(name)['architecture']

    kernel_id = utils.get_kernel(region, arch)
    arch_ec2 = "x86_64" if arch == "amd64" else arch

    log.debug('creating block_device_map')
    rootfs = BlockDeviceType()
    rootfs.delete_on_termination = True
    rootfs.size = size
    rootfs.snapshot_id = snapshot_id

    ephemeral = BlockDeviceType()
    ephemeral.ephemeral_name = 'ephemeral0'

    block_device_map = BlockDeviceMapping()
    block_device_map['/dev/sda1'] = rootfs
    block_device_map['/dev/sda2'] = ephemeral

    log.debug('registering image - %s', name)
    ami_id = conn.register_image(name=name,
                                 description=desc,
                                 architecture=arch_ec2,
                                 kernel_id=kernel_id,
                                 root_device_name="/dev/sda1",
                                 block_device_map=block_device_map)

    log.info('registered image - %s %s %s', ami_id, name, region)
    return ami_id
 def __update_bdm(self, bdm, bd_spec):
     """Update the BlockDeviceMapping bdm with the block device
     spec bd_spec.
     """
     try:
         dev_name, dev_value = bd_spec.split('=', 1)
     except Exception:
         raise CommandError(
             "Block device spec missing '=' : %s" % (bd_spec,))
     dot = None
     bdt = BlockDeviceType()
     if ':' in dev_value:
         blockdev_origin, dot = dev_value.split(':', 1)
     else:
         blockdev_origin = dev_value
     if blockdev_origin is None:
         raise CommandError("No source specified for %s" % (dev_name,))
     if blockdev_origin.startswith('ephemeral'):
         bdt.ephemeral_name = blockdev_origin
     elif blockdev_origin.startswith('snap-'):
         bdt.snapshot_id = blockdev_origin
     else:
         raise CommandError("Bad source specified for %s: %s" % (dev_name,
                                                     blockdev_origin))
     if dot is not None:
         if dot == 'delete':
             bdt.delete_on_termination = True
         elif dot == 'nodelete':
             bdt.delete_on_termination = False
         else:
             raise CommandError(
                     "Bad delete-on-termination specified for %s: %s" %
                                     (dev_name, dot))
     else:
         bdt.delete_on_termination = False
     dev_path = '/dev/' + dev_name
     bdm[dev_path] = bdt
Exemple #41
0
 def handleImages(self, action, clc, callback=None):
     if action == "DescribeImages":
         owner = self.get_argument("Owner", None)
         if not owner:
             owners = None
         else:
             owners = [owner]
         filters = self.get_filter_args()
         return clc.get_all_images(owners, filters, callback)
     elif action == "DescribeImageAttribute":
         imageid = self.get_argument("ImageId")
         attribute = self.get_argument("Attribute")
         return clc.get_image_attribute(imageid, attribute, callback)
     elif action == "ModifyImageAttribute":
         imageid = self.get_argument("ImageId")
         attribute = self.get_argument("Attribute")
         operation = self.get_argument("OperationType")
         users = self.get_argument_list("UserId")
         groups = self.get_argument_list("UserGroup")
         return clc.modify_image_attribute(imageid, attribute, operation, users, groups, callback)
     elif action == "ResetImageAttribute":
         imageid = self.get_argument("ImageId")
         attribute = self.get_argument("Attribute")
         return clc.reset_image_attribute(imageid, attribute, callback)
     elif action == "DeregisterImage":
         image_id = self.get_argument("ImageId")
         return clc.deregister_image(image_id, callback)
     elif action == "RegisterImage":
         image_location = self.get_argument("ImageLocation", None)
         name = self.get_argument("Name")
         description = self.get_argument("Description", None)
         if description != None:
             description = base64.b64decode(description)
         architecture = self.get_argument("Architecture", None)
         kernel_id = self.get_argument("KernelId", None)
         ramdisk_id = self.get_argument("RamdiskId", None)
         root_dev_name = self.get_argument("RootDeviceName", None)
         snapshot_id = self.get_argument("SnapshotId", None)
         # get block device mappings
         bdm = BlockDeviceMapping()
         mapping = self.get_argument("BlockDeviceMapping.1.DeviceName", None)
         idx = 1
         while mapping:
             pre = "BlockDeviceMapping.%d" % idx
             dev_name = mapping
             block_dev_type = BlockDeviceType()
             block_dev_type.ephemeral_name = self.get_argument("%s.VirtualName" % pre, None)
             if not (block_dev_type.ephemeral_name):
                 block_dev_type.no_device = self.get_argument("%s.NoDevice" % pre, "") == "true"
                 block_dev_type.snapshot_id = self.get_argument("%s.Ebs.SnapshotId" % pre, None)
                 block_dev_type.size = self.get_argument("%s.Ebs.VolumeSize" % pre, None)
                 block_dev_type.delete_on_termination = (
                     self.get_argument("%s.Ebs.DeleteOnTermination" % pre, "") == "true"
                 )
             bdm[dev_name] = block_dev_type
             idx += 1
             mapping = self.get_argument("BlockDeviceMapping.%d.DeviceName" % idx, None)
         if snapshot_id:
             rootbdm = BlockDeviceType()
             rootbdm.snapshot_id = snapshot_id
             bdm["/dev/sda1"] = rootbdm
         if len(bdm) == 0:
             bdm = None
         return clc.register_image(
             name, image_location, description, architecture, kernel_id, ramdisk_id, root_dev_name, bdm, callback
         )
Exemple #42
0
    def handleRunInstances(self, action, clc, user_data_file, callback):
        image_id = self.get_argument("ImageId")
        min = self.get_argument("MinCount", "1")
        max = self.get_argument("MaxCount", "1")
        key = self.get_argument("KeyName", None)
        groups = self.get_argument_list("SecurityGroup")
        sec_group_ids = self.get_argument_list("SecurityGroupId")
        if user_data_file:
            user_data = user_data_file
        else:
            user_data = self.get_argument("UserData", "")
            user_data = base64.b64decode(user_data)
        addr_type = self.get_argument("AddressingType", None)
        vm_type = self.get_argument("InstanceType", None)
        placement = self.get_argument("Placement.AvailabilityZone", None)
        placement_group = self.get_argument("Placement.GroupName", None)
        tenancy = self.get_argument("Placement.Tenancy", None)
        kernel = self.get_argument("KernelId", None)
        ramdisk = self.get_argument("RamdiskId", None)
        monitoring = False
        if self.get_argument("Monitoring.Enabled", "") == "true":
            monitoring = True
        subnet = self.get_argument("SubnetId", None)
        private_ip = self.get_argument("PrivateIpAddress", None)
        # get block device mappings
        bdm = BlockDeviceMapping()
        mapping = self.get_argument("BlockDeviceMapping.1.DeviceName", None)
        idx = 1
        while mapping:
            pre = "BlockDeviceMapping.%d" % idx
            dev_name = mapping
            block_dev_type = BlockDeviceType()
            block_dev_type.ephemeral_name = self.get_argument("%s.VirtualName" % pre, None)
            if not (block_dev_type.ephemeral_name):
                block_dev_type.no_device = self.get_argument("%s.NoDevice" % pre, "") == "true"
                block_dev_type.snapshot_id = self.get_argument("%s.Ebs.SnapshotId" % pre, None)
                block_dev_type.size = self.get_argument("%s.Ebs.VolumeSize" % pre, None)
                block_dev_type.delete_on_termination = (
                    self.get_argument("%s.Ebs.DeleteOnTermination" % pre, "") == "true"
                )
            bdm[dev_name] = block_dev_type
            idx += 1
            mapping = self.get_argument("BlockDeviceMapping.%d.DeviceName" % idx, None)
        if len(bdm) == 0:
            bdm = None

        api_termination = False
        if self.get_argument("DisableApiTermination", "") == "true":
            api_termination = True
        instance_shutdown = False
        if self.get_argument("InstanceInitiatedShutdownBehavior", "") == "true":
            instance_shutdown = True
        token = self.get_argument("ClientToken", None)
        addition_info = self.get_argument("AdditionInfo", None)
        instance_profile_name = self.get_argument("IamInstanceProfile.Name", None)
        instance_profile_arn = self.get_argument("IamInstanceProfile.Arn", None)

        return clc.run_instances(
            image_id,
            min_count=min,
            max_count=max,
            key_name=key,
            security_groups=groups,
            user_data=user_data,
            addressing_type=addr_type,
            instance_type=vm_type,
            placement=placement,
            kernel_id=kernel,
            ramdisk_id=ramdisk,
            monitoring_enabled=monitoring,
            subnet_id=subnet,
            block_device_map=bdm,
            disable_api_termination=api_termination,
            instance_initiated_shutdown_behavior=instance_shutdown,
            private_ip_address=private_ip,
            placement_group=placement_group,
            client_token=token,
            security_group_ids=sec_group_ids,
            additional_info=addition_info,
            instance_profile_name=instance_profile_name,
            instance_profile_arn=instance_profile_arn,
            tenancy=tenancy,
            callback=callback,
        )
def create_cluster(conn, args):
    if args.identity_file is None:
        print("ERROR: Must provide an identity file (-i) for ssh connections.",
              file=stderr)
        sys.exit(1)

    if args.key_pair is None:
        print("ERROR: Must provide a key pair name (-k) to use on instances.",
              file=stderr)
        sys.exit(1)

    # make or get the security group.
    security_group = get_or_make_group(conn, args.name, args.vpc_id)

    # set the inbound permission rules
    if len(security_group.rules) == 0:
        if __name__ == '__main__':
            if args.vpc_id is None:
                security_group.authorize(src_group=security_group)
            else:
                security_group.authorize('tcp', 22, 22,
                                         args.authorized_address)
                security_group.authorize('tcp', 8888, 8888,
                                         args.authorized_address)
                security_group.authorize('tcp', 7000, 7000,
                                         args.authorized_address)
                security_group.authorize('tcp', 7001, 7001,
                                         args.authorized_address)
                security_group.authorize('tcp', 7199, 7199,
                                         args.authorized_address)
                security_group.authorize('tcp', 9042, 9042,
                                         args.authorized_address)
                security_group.authorize('tcp', 9160, 9160,
                                         args.authorized_address)
    else:
        print("Security group already exists, skipping creation.")

    instances = cluster_nodes(conn, args.name)
    if any(instances):
        additional_tags = {}
        for i in instances:
            i.add_tags(
                dict(additional_tags,
                     Name="{cn}-node-{iid}".format(cn=args.name, iid=i.id)))
        return instances
    else:
        print(
            "Launching {m} instances for cluster...".format(m=args.node_count))

        try:
            image = conn.get_all_images(image_ids=args.ami)[0]

            block_map = BlockDeviceMapping()
            if args.ebs_vol_size > 0:
                if args.instance_type.startswith('m3.'):
                    for i in range(get_num_disks(args.instance_type)):
                        device = BlockDeviceType()
                        device.ephemeral_name = "ephemeral%d" % i
                        name = "/dev/sd" + string.ascii_letters[i + 1]
                        block_map[name] = device

                else:
                    device = EBSBlockDeviceType()
                    device.size = args.ebs_vol_size
                    device.volume_type = args.ebs_vol_type
                    device.delete_on_termination = True
                    key = "/dev/sd" + chr(ord('s') + 1)
                    block_map[key] = device

            nodes = image.run(key_name=args.key_pair,
                              security_group_ids=[security_group.id],
                              instance_type="",
                              placement=args.zone,
                              min_count=args.node_count,
                              max_count=args.node_count,
                              block_device_map=block_map,
                              subnet_id=None,
                              placement_group=None,
                              user_data=None,
                              instance_initiated_shutdown_behavior="stop",
                              instance_profile_name=None)

            print("Waiting for AWS to propagate instance metadata...")
            time.sleep(15)

            additional_tags = {}
            for node in nodes.instances:
                node.add_tags(
                    dict(additional_tags,
                         Name="{cn}-node-{iid}".format(cn=args.name,
                                                       iid=node.id)))

            return nodes.instances

        except Exception as e:
            print("Caught exception: ", e)
            print("ERROR: Could not find AMI " + args.ami, file=stderr)
            sys.exit(1)
Exemple #44
0
parser.add_argument('--server', help='Packaging serfver to use', default = None);
parser.add_argument('--instance-type', help='Instance Type', default = 'm3.xlarge');
parser.add_argument('--version', help='Version number to build', required = True, default = None);
args = parser.parse_args()

os.environ['AWS_ACCESS_KEY_ID'] = AWS_ACCESS_KEY_ID
os.environ['AWS_SECRET_ACCESS_KEY'] = AWS_SECRET_ACCESS_KEY

if args.server:
  print("Using server %s\n" % args.server)
  env.hosts = [args.server]
else:
  print("Creating server\n")
  conn = boto.ec2.connect_to_region(region)
  dev_sdb = BlockDeviceType()
  dev_sdb.ephemeral_name = 'ephemeral0' 
  bdm = BlockDeviceMapping()
  bdm['/dev/sdb'] = dev_sdb
  # bdm = boto.ec2.blockdevicemapping.BlockDeviceMapping({'/dev/xvdb': 'ephemeral0', '/dev/xvdc': 'ephemeral1'})

  res = conn.run_instances(
    ami_id,
    key_name = key_name,
    instance_type = args.instance_type,
    security_groups=[security_group],
    block_device_map = bdm)

  timeout = 60

  inst = res.instances[0]
Exemple #45
0
    def handleRunInstances(self, action, clc, user_data_file):
        image_id = self.get_argument('ImageId')
        min = self.get_argument('MinCount', '1')
        max = self.get_argument('MaxCount', '1')
        key = self.get_argument('KeyName', None)
        groups = self.get_argument_list('SecurityGroup')
        sec_group_ids = self.get_argument_list('SecurityGroupId')
        if user_data_file:
            user_data = user_data_file
        else:
            user_data = self.get_argument('UserData', None)
        addr_type = self.get_argument('AddressingType', None)
        vm_type = self.get_argument('InstanceType', None)
        placement = self.get_argument('Placement.AvailabilityZone', None)
        placement_group = self.get_argument('Placement.GroupName', None)
        tenancy = self.get_argument('Placement.Tenancy', None)
        kernel = self.get_argument('KernelId', None)
        ramdisk = self.get_argument('RamdiskId', None)
        monitoring = False
        if self.get_argument('Monitoring.Enabled', '') == 'true':
            monitoring = True
        subnet = self.get_argument('SubnetId', None)
        private_ip = self.get_argument('PrivateIpAddress', None)
        # get block device mappings
        bdm = BlockDeviceMapping()
        mapping = self.get_argument('BlockDeviceMapping.1.DeviceName', None)
        idx = 1
        while mapping:
            pre = 'BlockDeviceMapping.%d' % idx
            dev_name = mapping
            block_dev_type = BlockDeviceType()
            block_dev_type.ephemeral_name = self.get_argument(
                '%s.VirtualName' % pre, None)
            if not (block_dev_type.ephemeral_name):
                block_dev_type.no_device = \
                    (self.get_argument('%s.NoDevice' % pre, '') == 'true')
                block_dev_type.snapshot_id = \
                        self.get_argument('%s.Ebs.SnapshotId' % pre, None)
                block_dev_type.size = \
                        self.get_argument('%s.Ebs.VolumeSize' % pre, None)
                block_dev_type.delete_on_termination = \
                        (self.get_argument('%s.DeleteOnTermination' % pre, '') == 'true')
            bdm[dev_name] = block_dev_type
            idx += 1
            mapping = self.get_argument(
                'BlockDeviceMapping.%d.DeviceName' % idx, None)
        if len(bdm) == 0:
            bdm = None

        api_termination = False
        if self.get_argument('DisableApiTermination', '') == 'true':
            api_termination = True
        instance_shutdown = False
        if self.get_argument('InstanceInitiatedShutdownBehavior',
                             '') == 'true':
            instance_shutdown = True
        token = self.get_argument('ClientToken', None)
        addition_info = self.get_argument('AdditionInfo', None)
        instance_profile_name = self.get_argument('IamInstanceProfile.Name',
                                                  None)
        instance_profile_arn = self.get_argument('IamInstanceProfile.Arn',
                                                 None)

        return self.__normalize_instances__([
            clc.run_instances(
                image_id,
                min_count=min,
                max_count=max,
                key_name=key,
                security_groups=groups,
                user_data=user_data,
                addressing_type=addr_type,
                instance_type=vm_type,
                placement=placement,
                kernel_id=kernel,
                ramdisk_id=ramdisk,
                monitoring_enabled=monitoring,
                subnet_id=subnet,
                block_device_map=bdm,
                disable_api_termination=api_termination,
                instance_initiated_shutdown_behavior=instance_shutdown,
                private_ip_address=private_ip,
                placement_group=placement_group,
                client_token=token,
                security_group_ids=sec_group_ids,
                additional_info=addition_info,
                instance_profile_name=instance_profile_name,
                instance_profile_arn=instance_profile_arn,
                tenancy=tenancy)
        ])
Exemple #46
0
def launch_cluster(conn, opts, cluster_name):
    if opts.identity_file is None:
        print("ERROR: Must provide an identity file (-i) for ssh connections.", file=stderr)
        sys.exit(1)

    if opts.key_pair is None:
        print("ERROR: Must provide a key pair name (-k) to use on instances.", file=stderr)
        sys.exit(1)

    user_data_content = None
    if opts.user_data:
        with open(opts.user_data) as user_data_file:
            user_data_content = user_data_file.read()

    print("Setting up security groups...")
    master_group = get_or_make_group(conn, cluster_name + "-master", opts.vpc_id)
    slave_group = get_or_make_group(conn, cluster_name + "-slaves", opts.vpc_id)
    authorized_address = opts.authorized_address
    if master_group.rules == []:  # Group was just now created
        if opts.vpc_id is None:
            master_group.authorize(src_group=master_group)
            master_group.authorize(src_group=slave_group)
        else:
            master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
                                   src_group=master_group)
            master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
                                   src_group=master_group)
            master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
                                   src_group=master_group)
            master_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
                                   src_group=slave_group)
            master_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
                                   src_group=slave_group)
            master_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
                                   src_group=slave_group)
        master_group.authorize('tcp', 22, 22, authorized_address)
        master_group.authorize('tcp', 8080, 8081, authorized_address)
        master_group.authorize('tcp', 18080, 18080, authorized_address)
        master_group.authorize('tcp', 19999, 19999, authorized_address)
        master_group.authorize('tcp', 50030, 50030, authorized_address)
        master_group.authorize('tcp', 50070, 50070, authorized_address)
        master_group.authorize('tcp', 60070, 60070, authorized_address)
        master_group.authorize('tcp', 4040, 4045, authorized_address)
        # Rstudio (GUI for R) needs port 8787 for web access
        master_group.authorize('tcp', 8787, 8787, authorized_address)
        # HDFS NFS gateway requires 111,2049,4242 for tcp & udp
        master_group.authorize('tcp', 111, 111, authorized_address)
        master_group.authorize('udp', 111, 111, authorized_address)
        master_group.authorize('tcp', 2049, 2049, authorized_address)
        master_group.authorize('udp', 2049, 2049, authorized_address)
        master_group.authorize('tcp', 4242, 4242, authorized_address)
        master_group.authorize('udp', 4242, 4242, authorized_address)
        # RM in YARN mode uses 8088
        master_group.authorize('tcp', 8088, 8088, authorized_address)
        if opts.ganglia:
            master_group.authorize('tcp', 5080, 5080, authorized_address)
    if slave_group.rules == []:  # Group was just now created
        if opts.vpc_id is None:
            slave_group.authorize(src_group=master_group)
            slave_group.authorize(src_group=slave_group)
        else:
            slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
                                  src_group=master_group)
            slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
                                  src_group=master_group)
            slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
                                  src_group=master_group)
            slave_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1,
                                  src_group=slave_group)
            slave_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535,
                                  src_group=slave_group)
            slave_group.authorize(ip_protocol='udp', from_port=0, to_port=65535,
                                  src_group=slave_group)
        slave_group.authorize('tcp', 22, 22, authorized_address)
        slave_group.authorize('tcp', 8080, 8081, authorized_address)
        slave_group.authorize('tcp', 50060, 50060, authorized_address)
        slave_group.authorize('tcp', 50075, 50075, authorized_address)
        slave_group.authorize('tcp', 60060, 60060, authorized_address)
        slave_group.authorize('tcp', 60075, 60075, authorized_address)
#Kylix
        slave_group.authorize(ip_protocol='tcp', from_port=50050, to_port=50060,
                               src_group=slave_group)
        slave_group.authorize(ip_protocol='udp', from_port=50050, to_port=50060,
                               src_group=slave_group)
        slave_group.authorize(ip_protocol='tcp', from_port=50050, to_port=50060,
                               src_group=master_group)
        slave_group.authorize(ip_protocol='udp', from_port=50050, to_port=50060,
                               src_group=master_group)
        master_group.authorize(ip_protocol='tcp', from_port=50050, to_port=50060,
                               src_group=slave_group)
        master_group.authorize(ip_protocol='udp', from_port=50050, to_port=50060,
                               src_group=slave_group)


    # Check if instances are already running in our groups
    existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
                                                             die_on_error=False)
    if existing_slaves or (existing_masters and not opts.use_existing_master):
        print("ERROR: There are already instances running in group %s or %s" %
              (master_group.name, slave_group.name), file=stderr)
        sys.exit(1)

    # we use group ids to work around https://github.com/boto/boto/issues/350
    additional_group_ids = []
    if opts.additional_security_group:
        additional_group_ids = [sg.id
                                for sg in conn.get_all_security_groups()
                                if opts.additional_security_group in (sg.name, sg.id)]
    print("Launching instances...")

    try:
        image = conn.get_all_images(image_ids=[opts.ami])[0]
    except:
        print("Could not find AMI " + opts.ami, file=stderr)
        sys.exit(1)

    # Create block device mapping so that we can add EBS volumes if asked to.
    # The first drive is attached as /dev/sds, 2nd as /dev/sdt, ... /dev/sdz
    block_map = BlockDeviceMapping()
    if opts.ebs_vol_size > 0:
        for i in range(opts.ebs_vol_num):
            device = EBSBlockDeviceType()
            device.size = opts.ebs_vol_size
            device.volume_type = opts.ebs_vol_type
            device.delete_on_termination = True
            block_map["/dev/sd" + chr(ord('s') + i)] = device

    # AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
    if opts.instance_type.startswith('m3.'):
        for i in range(get_num_disks(opts.instance_type)):
            dev = BlockDeviceType()
            dev.ephemeral_name = 'ephemeral%d' % i
            # The first ephemeral drive is /dev/sdb.
            name = '/dev/sd' + string.letters[i + 1]
            block_map[name] = dev

    # Launch slaves
    if opts.spot_price is not None:
        # Launch spot instances with the requested price
        print("Requesting %d slaves as spot instances with price $%.3f" %
              (opts.slaves, opts.spot_price))
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        my_req_ids = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            slave_reqs = conn.request_spot_instances(
                price=opts.spot_price,
                image_id=opts.ami,
                launch_group="launch-group-%s" % cluster_name,
                placement=zone,
                count=num_slaves_this_zone,
                key_name=opts.key_pair,
                security_group_ids=[slave_group.id] + additional_group_ids,
                instance_type=opts.instance_type,
                block_device_map=block_map,
                subnet_id=opts.subnet_id,
                placement_group=opts.placement_group,
                user_data=user_data_content,
                instance_profile_name=opts.instance_profile_name)
            my_req_ids += [req.id for req in slave_reqs]
            i += 1

        print("Waiting for spot instances to be granted...")
        try:
            while True:
                time.sleep(10)
                reqs = conn.get_all_spot_instance_requests()
                id_to_req = {}
                for r in reqs:
                    id_to_req[r.id] = r
                active_instance_ids = []
                for i in my_req_ids:
                    if i in id_to_req and id_to_req[i].state == "active":
                        active_instance_ids.append(id_to_req[i].instance_id)
                if len(active_instance_ids) == opts.slaves:
                    print("All %d slaves granted" % opts.slaves)
                    reservations = conn.get_all_reservations(active_instance_ids)
                    slave_nodes = []
                    for r in reservations:
                        slave_nodes += r.instances
                    break
                else:
                    print("%d of %d slaves granted, waiting longer" % (
                        len(active_instance_ids), opts.slaves))
        except:
            print("Canceling spot instance requests")
            conn.cancel_spot_instance_requests(my_req_ids)
            # Log a warning if any of these requests actually launched instances:
            (master_nodes, slave_nodes) = get_existing_cluster(
                conn, opts, cluster_name, die_on_error=False)
            running = len(master_nodes) + len(slave_nodes)
            if running:
                print(("WARNING: %d instances are still running" % running), file=stderr)
            sys.exit(0)
    else:
        # Launch non-spot instances
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        slave_nodes = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            if num_slaves_this_zone > 0:
                slave_res = image.run(
                    key_name=opts.key_pair,
                    security_group_ids=[slave_group.id] + additional_group_ids,
                    instance_type=opts.instance_type,
                    placement=zone,
                    min_count=num_slaves_this_zone,
                    max_count=num_slaves_this_zone,
                    block_device_map=block_map,
                    subnet_id=opts.subnet_id,
                    placement_group=opts.placement_group,
                    user_data=user_data_content,
                    instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
                    instance_profile_name=opts.instance_profile_name)
                slave_nodes += slave_res.instances
                print("Launched {s} slave{plural_s} in {z}, regid = {r}".format(
                      s=num_slaves_this_zone,
                      plural_s=('' if num_slaves_this_zone == 1 else 's'),
                      z=zone,
                      r=slave_res.id))
            i += 1

    # Launch or resume masters
    if existing_masters:
        print("Starting master...")
        for inst in existing_masters:
            if inst.state not in ["shutting-down", "terminated"]:
                inst.start()
        master_nodes = existing_masters
    else:
        master_type = opts.master_instance_type
        if master_type == "":
            master_type = opts.instance_type
        if opts.zone == 'all':
            opts.zone = random.choice(conn.get_all_zones()).name
        master_res = image.run(
            key_name=opts.key_pair,
            security_group_ids=[master_group.id] + additional_group_ids,
            instance_type=master_type,
            placement=opts.zone,
            min_count=1,
            max_count=1,
            block_device_map=block_map,
            subnet_id=opts.subnet_id,
            placement_group=opts.placement_group,
            user_data=user_data_content,
            instance_initiated_shutdown_behavior=opts.instance_initiated_shutdown_behavior,
            instance_profile_name=opts.instance_profile_name)

        master_nodes = master_res.instances
        print("Launched master in %s, regid = %s" % (zone, master_res.id))

    # This wait time corresponds to SPARK-4983
    print("Waiting for AWS to propagate instance metadata...")
    time.sleep(15)

    # Give the instances descriptive names and set additional tags
    additional_tags = {}
    if opts.additional_tags.strip():
        additional_tags = dict(
            map(str.strip, tag.split(':', 1)) for tag in opts.additional_tags.split(',')
        )

    for master in master_nodes:
        master.add_tags(
            dict(additional_tags, Name='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
        )

    for slave in slave_nodes:
        slave.add_tags(
            dict(additional_tags, Name='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))
        )

    # Return all the instances
    return (master_nodes, slave_nodes)
Exemple #47
0
def launch_cluster(conn, opts, cluster_name):
    if opts.identity_file is None:
        print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
        sys.exit(1)
    if opts.key_pair is None:
        print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
        sys.exit(1)

    user_data_content = None
    if opts.user_data:
        with open(opts.user_data) as user_data_file:
            user_data_content = user_data_file.read()

    print "Setting up security groups..."
    master_group = get_or_make_group(conn, cluster_name + "-master")
    slave_group = get_or_make_group(conn, cluster_name + "-slaves")
    if master_group.rules == []:  # Group was just now created
        master_group.authorize(src_group=master_group)
        master_group.authorize(src_group=slave_group)
        master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
        master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
        master_group.authorize('tcp', 18080, 18080, '0.0.0.0/0')
        master_group.authorize('tcp', 19999, 19999, '0.0.0.0/0')
        master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
        master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
        master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
        master_group.authorize('tcp', 4040, 4045, '0.0.0.0/0')
        if opts.ganglia:
            master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
    if slave_group.rules == []:  # Group was just now created
        slave_group.authorize(src_group=master_group)
        slave_group.authorize(src_group=slave_group)
        slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
        slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
        slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
        slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
        slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
        slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')

    # Check if instances are already running in our groups
    existing_masters, existing_slaves = get_existing_cluster(conn, opts, cluster_name,
                                                             die_on_error=False)
    if existing_slaves or (existing_masters and not opts.use_existing_master):
        print >> stderr, ("ERROR: There are already instances running in " +
                          "group %s or %s" % (master_group.name, slave_group.name))
        sys.exit(1)

    # Figure out Spark AMI
    if opts.ami is None:
        opts.ami = get_spark_ami(opts)
    print "Launching instances..."

    try:
        image = conn.get_all_images(image_ids=[opts.ami])[0]
    except:
        print >> stderr, "Could not find AMI " + opts.ami
        sys.exit(1)

    # Create block device mapping so that we can add an EBS volume if asked to
    block_map = BlockDeviceMapping()
    if opts.ebs_vol_size > 0:
        device = EBSBlockDeviceType()
        device.size = opts.ebs_vol_size
        device.delete_on_termination = True
        block_map["/dev/sdv"] = device

    # AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
    if opts.instance_type.startswith('m3.'):
        for i in range(get_num_disks(opts.instance_type)):
            dev = BlockDeviceType()
            dev.ephemeral_name = 'ephemeral%d' % i
            # The first ephemeral drive is /dev/sdb.
            name = '/dev/sd' + string.letters[i + 1]
            block_map[name] = dev

    # Launch slaves
    if opts.spot_price is not None:
        # Launch spot instances with the requested price
        print ("Requesting %d slaves as spot instances with price $%.3f" %
               (opts.slaves, opts.spot_price))
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        my_req_ids = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            slave_reqs = conn.request_spot_instances(
                price=opts.spot_price,
                image_id=opts.ami,
                launch_group="launch-group-%s" % cluster_name,
                placement=zone,
                count=num_slaves_this_zone,
                key_name=opts.key_pair,
                security_groups=[slave_group],
                instance_type=opts.instance_type,
                block_device_map=block_map,
                user_data=user_data_content)
            my_req_ids += [req.id for req in slave_reqs]
            i += 1

        print "Waiting for spot instances to be granted..."
        try:
            while True:
                time.sleep(10)
                reqs = conn.get_all_spot_instance_requests()
                id_to_req = {}
                for r in reqs:
                    id_to_req[r.id] = r
                active_instance_ids = []
                for i in my_req_ids:
                    if i in id_to_req and id_to_req[i].state == "active":
                        active_instance_ids.append(id_to_req[i].instance_id)
                if len(active_instance_ids) == opts.slaves:
                    print "All %d slaves granted" % opts.slaves
                    reservations = conn.get_all_instances(active_instance_ids)
                    slave_nodes = []
                    for r in reservations:
                        slave_nodes += r.instances
                    break
                else:
                    print "%d of %d slaves granted, waiting longer" % (
                        len(active_instance_ids), opts.slaves)
        except:
            print "Canceling spot instance requests"
            conn.cancel_spot_instance_requests(my_req_ids)
            # Log a warning if any of these requests actually launched instances:
            (master_nodes, slave_nodes) = get_existing_cluster(
                conn, opts, cluster_name, die_on_error=False)
            running = len(master_nodes) + len(slave_nodes)
            if running:
                print >> stderr, ("WARNING: %d instances are still running" % running)
            sys.exit(0)
    else:
        # Launch non-spot instances
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        slave_nodes = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            if num_slaves_this_zone > 0:
                slave_res = image.run(key_name=opts.key_pair,
                                      security_groups=[slave_group],
                                      instance_type=opts.instance_type,
                                      placement=zone,
                                      min_count=num_slaves_this_zone,
                                      max_count=num_slaves_this_zone,
                                      block_device_map=block_map,
                                      user_data=user_data_content)
                slave_nodes += slave_res.instances
                print "Launched %d slaves in %s, regid = %s" % (num_slaves_this_zone,
                                                                zone, slave_res.id)
            i += 1

    # Launch or resume masters
    if existing_masters:
        print "Starting master..."
        for inst in existing_masters:
            if inst.state not in ["shutting-down", "terminated"]:
                inst.start()
        master_nodes = existing_masters
    else:
        master_type = opts.master_instance_type
        if master_type == "":
            master_type = opts.instance_type
        if opts.zone == 'all':
            opts.zone = random.choice(conn.get_all_zones()).name
        master_res = image.run(key_name=opts.key_pair,
                               security_groups=[master_group],
                               instance_type=master_type,
                               placement=opts.zone,
                               min_count=1,
                               max_count=1,
                               block_device_map=block_map,
                               user_data=user_data_content)
        master_nodes = master_res.instances
        print "Launched master in %s, regid = %s" % (zone, master_res.id)

    # Give the instances descriptive names
    for master in master_nodes:
        master.add_tag(
            key='Name',
            value='{cn}-master-{iid}'.format(cn=cluster_name, iid=master.id))
    for slave in slave_nodes:
        slave.add_tag(
            key='Name',
            value='{cn}-slave-{iid}'.format(cn=cluster_name, iid=slave.id))

    # Return all the instances
    return (master_nodes, slave_nodes)
Exemple #48
0
import boto
ec2conn = boto.connect_ec2()
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
map = BlockDeviceMapping()
sdb1 = BlockDeviceType()
sdc1 = BlockDeviceType()
sdd1 = BlockDeviceType()
sde1 = BlockDeviceType()
sdb1.ephemeral_name = 'ephemeral0'
sdc1.ephemeral_name = 'ephemeral1'
sdd1.ephemeral_name = 'ephemeral2'
sde1.ephemeral_name = 'ephemeral3'
map['/dev/sdb1'] = sdb1
map['/dev/sdc1'] = sdc1
map['/dev/sdd1'] = sdd1
map['/dev/sde1'] = sde1
img = ec2conn.get_all_images(image_ids=['ami-f61dfd9f'])[0]
img.run(key_name='id_bv-keypair',
        instance_type='c1.xlarge',
        block_device_map=map)
def test_create_launch_configuration_with_block_device_mappings():
    block_device_mapping = BlockDeviceMapping()

    ephemeral_drive = BlockDeviceType()
    ephemeral_drive.ephemeral_name = "ephemeral0"
    block_device_mapping["/dev/xvdb"] = ephemeral_drive

    snapshot_drive = BlockDeviceType()
    snapshot_drive.snapshot_id = "snap-1234abcd"
    snapshot_drive.volume_type = "standard"
    block_device_mapping["/dev/xvdp"] = snapshot_drive

    ebs_drive = BlockDeviceType()
    ebs_drive.volume_type = "io1"
    ebs_drive.size = 100
    ebs_drive.iops = 1000
    ebs_drive.delete_on_termination = False
    block_device_mapping["/dev/xvdh"] = ebs_drive

    conn = boto.connect_autoscale(use_block_device_types=True)
    config = LaunchConfiguration(
        name="tester",
        image_id="ami-abcd1234",
        instance_type="m1.small",
        key_name="the_keys",
        security_groups=["default", "default2"],
        user_data=b"This is some user_data",
        instance_monitoring=True,
        instance_profile_name="arn:aws:iam::{}:instance-profile/testing".format(
            ACCOUNT_ID
        ),
        spot_price=0.1,
        block_device_mappings=[block_device_mapping],
    )
    conn.create_launch_configuration(config)

    launch_config = conn.get_all_launch_configurations()[0]
    launch_config.name.should.equal("tester")
    launch_config.image_id.should.equal("ami-abcd1234")
    launch_config.instance_type.should.equal("m1.small")
    launch_config.key_name.should.equal("the_keys")
    set(launch_config.security_groups).should.equal(set(["default", "default2"]))
    launch_config.user_data.should.equal(b"This is some user_data")
    launch_config.instance_monitoring.enabled.should.equal("true")
    launch_config.instance_profile_name.should.equal(
        "arn:aws:iam::{}:instance-profile/testing".format(ACCOUNT_ID)
    )
    launch_config.spot_price.should.equal(0.1)
    len(launch_config.block_device_mappings).should.equal(3)

    returned_mapping = launch_config.block_device_mappings

    set(returned_mapping.keys()).should.equal(
        set(["/dev/xvdb", "/dev/xvdp", "/dev/xvdh"])
    )

    returned_mapping["/dev/xvdh"].iops.should.equal(1000)
    returned_mapping["/dev/xvdh"].size.should.equal(100)
    returned_mapping["/dev/xvdh"].volume_type.should.equal("io1")
    returned_mapping["/dev/xvdh"].delete_on_termination.should.be.false

    returned_mapping["/dev/xvdp"].snapshot_id.should.equal("snap-1234abcd")
    returned_mapping["/dev/xvdp"].volume_type.should.equal("standard")

    returned_mapping["/dev/xvdb"].ephemeral_name.should.equal("ephemeral0")
def create_instance(name, config, region, key_name, ssh_key, instance_data,
                    deploypass, loaned_to, loan_bug, create_ami,
                    ignore_subnet_check, max_attempts):
    """Creates an AMI instance with the given name and config. The config must
    specify things like ami id."""
    conn = get_aws_connection(region)
    # Make sure we don't request the same things twice
    token = str(uuid.uuid4())[:16]

    instance_data = instance_data.copy()
    instance_data['name'] = name
    instance_data['domain'] = config['domain']
    instance_data['hostname'] = '{name}.{domain}'.format(
        name=name, domain=config['domain'])

    ami = conn.get_all_images(image_ids=[config["ami"]])[0]
    bdm = None
    if 'device_map' in config:
        bdm = BlockDeviceMapping()
        for device, device_info in config['device_map'].items():
            bd = BlockDeviceType()
            if device_info.get('size'):
                bd.size = device_info['size']
            # Overwrite root device size for HVM instances, since they cannot
            # be resized online
            if ami.virtualization_type == "hvm" and \
                    ami.root_device_name == device:
                bd.size = ami.block_device_mapping[ami.root_device_name].size
            if device_info.get("delete_on_termination") is not False:
                bd.delete_on_termination = True
            if device_info.get("ephemeral_name"):
                bd.ephemeral_name = device_info["ephemeral_name"]

            bdm[device] = bd

    interfaces = make_instance_interfaces(region, instance_data['hostname'],
                                          ignore_subnet_check,
                                          config.get('subnet_ids'),
                                          config.get('security_group_ids', []),
                                          config.get("use_public_ip"))

    keep_going, attempt = True, 1
    while keep_going:
        try:
            if 'user_data_file' in config:
                user_data = open(config['user_data_file']).read()
            else:
                user_data = get_user_data_tmpl(config['type'])
            if user_data:
                user_data = user_data.format(
                    puppet_server=instance_data.get('default_puppet_server'),
                    fqdn=instance_data['hostname'],
                    hostname=instance_data['name'],
                    domain=instance_data['domain'],
                    dns_search_domain=config.get('dns_search_domain'),
                    password=deploypass,
                    moz_instance_type=config['type'],
                    region_dns_atom=get_region_dns_atom(region),
                )

            reservation = conn.run_instances(
                image_id=config['ami'],
                key_name=key_name,
                instance_type=config['instance_type'],
                block_device_map=bdm,
                client_token=token,
                disable_api_termination=config.get('disable_api_termination'),
                user_data=user_data,
                instance_profile_name=config.get('instance_profile_name'),
                network_interfaces=interfaces,
            )
            break
        except boto.exception.BotoServerError:
            log.exception("Cannot start an instance")
        time.sleep(10)
        if max_attempts:
            attempt += 1
            keep_going = max_attempts >= attempt

    instance = reservation.instances[0]
    log.info("instance %s created, waiting to come up", instance)
    # Wait for the instance to come up
    wait_for_status(instance, "state", "running", "update")
    instance.add_tag('Name', name)
    instance.add_tag('FQDN', instance_data['hostname'])
    instance.add_tag('created',
                     time.strftime("%Y-%m-%d %H:%M:%S %Z", time.gmtime()))
    instance.add_tag('moz-type', config['type'])
    if loaned_to:
        instance.add_tag("moz-loaned-to", loaned_to)
    if loan_bug:
        instance.add_tag("moz-bug", loan_bug)

    log.info("assimilating %s", instance)
    instance.add_tag('moz-state', 'pending')

    keep_going, attempt = True, 1
    while keep_going:
        try:
            # Don't reboot if need to create ami
            reboot = not create_ami
            assimilate_instance(instance=instance,
                                config=config,
                                ssh_key=ssh_key,
                                instance_data=instance_data,
                                deploypass=deploypass,
                                reboot=reboot)
            break
        except NetworkError as e:
            # it takes a while for the machine to start/reboot so the
            # NetworkError exception is quite common, just log the error,
            # without the full stack trace
            log.warn(
                "cannot connect; instance may still be starting  %s (%s, %s) - %s,"
                "retrying in %d sec ...", instance_data['hostname'],
                instance.id, instance.private_ip_address, e, FAILURE_TIMEOUT)
            time.sleep(FAILURE_TIMEOUT)

        except:
            # any other exception
            log.warn(
                "problem assimilating %s (%s, %s), retrying in "
                "%d sec ...",
                instance_data['hostname'],
                instance.id,
                instance.private_ip_address,
                FAILURE_TIMEOUT,
                exc_info=True)
            time.sleep(FAILURE_TIMEOUT)
        if max_attempts:
            attempt += 1
            keep_going = max_attempts >= attempt

    instance.add_tag('moz-state', 'ready')
    if create_ami:
        ami_name = "spot-%s-%s" % (
            config['type'], time.strftime("%Y-%m-%d-%H-%M", time.gmtime()))
        log.info("Generating AMI %s", ami_name)
        ami_cleanup(mount_point="/", distro=config["distro"])
        root_bd = instance.block_device_mapping[instance.root_device_name]
        volume = instance.connection.get_all_volumes(
            volume_ids=[root_bd.volume_id])[0]
        # The instance has to be stopped to flush EBS caches
        instance.stop()
        wait_for_status(instance, 'state', 'stopped', 'update')
        ami = volume_to_ami(volume=volume,
                            ami_name=ami_name,
                            arch=instance.architecture,
                            virtualization_type=instance.virtualization_type,
                            kernel_id=instance.kernel,
                            root_device_name=instance.root_device_name,
                            tags=config["tags"])
        log.info("AMI %s (%s) is ready", ami_name, ami.id)
        log.warn("Terminating %s", instance)
        instance.terminate()
Exemple #51
0
def launch_cluster(conn, config):
  key_name = config.get('ec2', 'key_name')
  if not key_name:
    exit('ERROR - key.name is not set zetten.props')

  cur_nodes = get_active_cluster(conn, config)
  if cur_nodes:
    exit('ERROR - There are already instances running for {0} cluster'.format(config.cluster_name))

  if isfile(config.hosts_path):
    exit("ERROR - A hosts file already exists at {0}.  Please delete before running launch again".format(config.hosts_path))

  print "Launching {0} cluster".format(config.cluster_name)

  vpc_id = None
  if config.has_option('ec2', 'vpc_id'):
    vpc_id = config.get('ec2', 'vpc_id')

  subnet_id = None
  if config.has_option('ec2', 'subnet_id'):
    subnet_id = config.get('ec2', 'subnet_id')

  security_group = get_or_make_group(conn, config.cluster_name + "-group", vpc_id)
  if security_group.rules == []: # Group was just now created
    if vpc_id is None:
      security_group.authorize(src_group=security_group)
    else:
      security_group.authorize(ip_protocol='icmp', from_port=-1, to_port=-1, src_group=security_group)
      security_group.authorize(ip_protocol='tcp', from_port=0, to_port=65535, src_group=security_group)
      security_group.authorize(ip_protocol='udp', from_port=0, to_port=65535, src_group=security_group)
    security_group.authorize('tcp', 22, 22, '0.0.0.0/0')

  instance_d = {}
  for (hostname, services) in config.nodes().items():

    if 'worker' in services:
      instance_type = config.get('ec2', 'worker_instance_type')
      num_ephemeral = config.worker_num_ephemeral()
    else:
      instance_type = config.get('ec2', 'default_instance_type')
      num_ephemeral = config.default_num_ephemeral()

    if config.has_option('ec2', 'aws_ami'):
      host_ami = config.get('ec2', 'aws_ami')
    else:
      host_ami = config.get_image_id(instance_type)

    if not host_ami:
      exit('ERROR - Image not found for instance type: '+instance_type)

    bdm = BlockDeviceMapping()
    bdm['/dev/sda1'] = BlockDeviceType(delete_on_termination=True)

    for i in range(0, num_ephemeral):
      bdt = BlockDeviceType()
      bdt.ephemeral_name=config.ephemeral_root + str(i)
      bdm[config.device_root + chr(ord('b') + i)] = bdt

    try:
      resv = conn.run_instances(key_name=key_name,
                                image_id=host_ami,
                                security_group_ids=[security_group.id],
                                instance_type=instance_type,
                                subnet_id=subnet_id,
                                min_count=1,
                                max_count=1,
                                block_device_map=bdm)
    except EC2ResponseError as e:
      ami_help = """PLEASE NOTE - If you have accepted the software terms for CentOS 7 and still get an error, 
this could be due to CentOS releasing new images of CentOS 7.  When this occurs, the old images 
are no longer available to new users.  If you think this is the case, go to the CentOS 7 product
page on AWS Marketplace at the URL below to find the latest AMI: 

https://aws.amazon.com/marketplace/ordering?productId=b7ee8a69-ee97-4a49-9e68-afaee216db2e

On the product page, click 'Manual Launch' to find the latest AMI ID for your EC2 region.
This should be used to set the 'aws_ami' property in your zetten.props which will override
the default AMI IDs used by Zetten.  After setting the 'aws_ami' property, run the launch 
command again.

Also, let us know that this has occured by creating an issue on the Zetten's GitHub page 
and we'll upgrade the defaults AMIs used by Zetten to be the latest CentOS images.
"""
      exit("ERROR - Failed to launch EC2 instance due to exception below:\n\n{0}\n\n{1}".format(e, ami_help))
  
    if len(resv.instances) != 1:
      exit('ERROR - Failed to start {0} node'.format(hostname))

    instance = resv.instances[0]

    instance_d[hostname] = instance.id
    print 'Launching {0} node using {1}'.format(hostname, host_ami)
 

  while True:
    time.sleep(5)

    nodes = get_cluster(conn, config, ['running'])
    num_actual = len(nodes)
    num_expected = len(config.nodes())

    if num_actual == num_expected:
      # Tag instances and create hosts file
      with open(config.hosts_path, 'w') as hosts_file:
        for (hostname, services) in config.nodes().items():
          instance = get_instance(nodes, instance_d[hostname])

          instance.add_tag(key='Name', value='{cn}-{id}'.format(cn=config.cluster_name, id=hostname))
          for tkey, tval in config.instance_tags().iteritems():
            instance.add_tag(key=tkey, value=tval)
          public_ip = ''
          if instance.ip_address:
            public_ip = instance.ip_address
          private_ip = instance.private_ip_address
          print >>hosts_file, hostname, private_ip, public_ip
      print "All {0} nodes have started.  Created hosts file at {1}".format(num_actual, config.hosts_path)
      break
    else:
      print "{0} of {1} nodes have started.  Waiting another 5 sec..".format(num_actual, num_expected)
Exemple #52
0
def launch_cluster(conn, opts, cluster_name):
    if opts.identity_file is None:
        print >> stderr, "ERROR: Must provide an identity file (-i) for ssh connections."
        sys.exit(1)
    if opts.key_pair is None:
        print >> stderr, "ERROR: Must provide a key pair name (-k) to use on instances."
        sys.exit(1)

    user_data_content = None
    if opts.user_data:
        with open(opts.user_data) as user_data_file:
            user_data_content = user_data_file.read()

    print "Setting up security groups..."
    master_group = get_or_make_group(conn, cluster_name + "-master")
    slave_group = get_or_make_group(conn, cluster_name + "-slaves")
    if master_group.rules == []:  # Group was just now created
        master_group.authorize(src_group=master_group)
        master_group.authorize(src_group=slave_group)
        master_group.authorize('tcp', 22, 22, '0.0.0.0/0')
        master_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
        master_group.authorize('tcp', 18080, 18080, '0.0.0.0/0')
        master_group.authorize('tcp', 19999, 19999, '0.0.0.0/0')
        master_group.authorize('tcp', 50030, 50030, '0.0.0.0/0')
        master_group.authorize('tcp', 50070, 50070, '0.0.0.0/0')
        master_group.authorize('tcp', 60070, 60070, '0.0.0.0/0')
        master_group.authorize('tcp', 4040, 4045, '0.0.0.0/0')
        if opts.ganglia:
            master_group.authorize('tcp', 5080, 5080, '0.0.0.0/0')
    if slave_group.rules == []:  # Group was just now created
        slave_group.authorize(src_group=master_group)
        slave_group.authorize(src_group=slave_group)
        slave_group.authorize('tcp', 22, 22, '0.0.0.0/0')
        slave_group.authorize('tcp', 8080, 8081, '0.0.0.0/0')
        slave_group.authorize('tcp', 50060, 50060, '0.0.0.0/0')
        slave_group.authorize('tcp', 50075, 50075, '0.0.0.0/0')
        slave_group.authorize('tcp', 60060, 60060, '0.0.0.0/0')
        slave_group.authorize('tcp', 60075, 60075, '0.0.0.0/0')

    # Check if instances are already running in our groups
    existing_masters, existing_slaves = get_existing_cluster(
        conn, opts, cluster_name, die_on_error=False)
    if existing_slaves or (existing_masters and not opts.use_existing_master):
        print >> stderr, ("ERROR: There are already instances running in " +
                          "group %s or %s" %
                          (master_group.name, slave_group.name))
        sys.exit(1)

    # Figure out Spark AMI
    if opts.ami is None:
        opts.ami = get_spark_ami(opts)
    print "Launching instances..."

    try:
        image = conn.get_all_images(image_ids=[opts.ami])[0]
    except:
        print >> stderr, "Could not find AMI " + opts.ami
        sys.exit(1)

    # Create block device mapping so that we can add an EBS volume if asked to
    block_map = BlockDeviceMapping()
    if opts.ebs_vol_size > 0:
        device = EBSBlockDeviceType()
        device.size = opts.ebs_vol_size
        device.delete_on_termination = True
        block_map["/dev/sdv"] = device

    # AWS ignores the AMI-specified block device mapping for M3 (see SPARK-3342).
    if opts.instance_type.startswith('m3.'):
        for i in range(get_num_disks(opts.instance_type)):
            dev = BlockDeviceType()
            dev.ephemeral_name = 'ephemeral%d' % i
            # The first ephemeral drive is /dev/sdb.
            name = '/dev/sd' + string.letters[i + 1]
            block_map[name] = dev

    # Launch slaves
    if opts.spot_price is not None:
        # Launch spot instances with the requested price
        print("Requesting %d slaves as spot instances with price $%.3f" %
              (opts.slaves, opts.spot_price))
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        my_req_ids = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            slave_reqs = conn.request_spot_instances(
                price=opts.spot_price,
                image_id=opts.ami,
                launch_group="launch-group-%s" % cluster_name,
                placement=zone,
                count=num_slaves_this_zone,
                key_name=opts.key_pair,
                security_groups=[slave_group],
                instance_type=opts.instance_type,
                block_device_map=block_map,
                user_data=user_data_content)
            my_req_ids += [req.id for req in slave_reqs]
            i += 1

        print "Waiting for spot instances to be granted..."
        try:
            while True:
                time.sleep(10)
                reqs = conn.get_all_spot_instance_requests()
                id_to_req = {}
                for r in reqs:
                    id_to_req[r.id] = r
                active_instance_ids = []
                for i in my_req_ids:
                    if i in id_to_req and id_to_req[i].state == "active":
                        active_instance_ids.append(id_to_req[i].instance_id)
                if len(active_instance_ids) == opts.slaves:
                    print "All %d slaves granted" % opts.slaves
                    reservations = conn.get_all_instances(active_instance_ids)
                    slave_nodes = []
                    for r in reservations:
                        slave_nodes += r.instances
                    break
                else:
                    print "%d of %d slaves granted, waiting longer" % (
                        len(active_instance_ids), opts.slaves)
        except:
            print "Canceling spot instance requests"
            conn.cancel_spot_instance_requests(my_req_ids)
            # Log a warning if any of these requests actually launched instances:
            (master_nodes,
             slave_nodes) = get_existing_cluster(conn,
                                                 opts,
                                                 cluster_name,
                                                 die_on_error=False)
            running = len(master_nodes) + len(slave_nodes)
            if running:
                print >> stderr, ("WARNING: %d instances are still running" %
                                  running)
            sys.exit(0)
    else:
        # Launch non-spot instances
        zones = get_zones(conn, opts)
        num_zones = len(zones)
        i = 0
        slave_nodes = []
        for zone in zones:
            num_slaves_this_zone = get_partition(opts.slaves, num_zones, i)
            if num_slaves_this_zone > 0:
                slave_res = image.run(key_name=opts.key_pair,
                                      security_groups=[slave_group],
                                      instance_type=opts.instance_type,
                                      placement=zone,
                                      min_count=num_slaves_this_zone,
                                      max_count=num_slaves_this_zone,
                                      block_device_map=block_map,
                                      user_data=user_data_content)
                slave_nodes += slave_res.instances
                print "Launched %d slaves in %s, regid = %s" % (
                    num_slaves_this_zone, zone, slave_res.id)
            i += 1

    # Launch or resume masters
    if existing_masters:
        print "Starting master..."
        for inst in existing_masters:
            if inst.state not in ["shutting-down", "terminated"]:
                inst.start()
        master_nodes = existing_masters
    else:
        master_type = opts.master_instance_type
        if master_type == "":
            master_type = opts.instance_type
        if opts.zone == 'all':
            opts.zone = random.choice(conn.get_all_zones()).name
        master_res = image.run(key_name=opts.key_pair,
                               security_groups=[master_group],
                               instance_type=master_type,
                               placement=opts.zone,
                               min_count=1,
                               max_count=1,
                               block_device_map=block_map,
                               user_data=user_data_content)
        master_nodes = master_res.instances
        print "Launched master in %s, regid = %s" % (zone, master_res.id)

    # Give the instances descriptive names
    for master in master_nodes:
        master.add_tag(key='Name',
                       value='{cn}-master-{iid}'.format(cn=cluster_name,
                                                        iid=master.id))
    for slave in slave_nodes:
        slave.add_tag(key='Name',
                      value='{cn}-slave-{iid}'.format(cn=cluster_name,
                                                      iid=slave.id))

    # Return all the instances
    return (master_nodes, slave_nodes)
Exemple #53
0
 def handleImages(self, action, clc):
     if action == 'DescribeImages':
         owner = self.get_argument('Owner', None)
         if not owner:
             owners = None
         else:
             owners = [owner]
         return clc.get_all_images(owners)
     elif action == 'DescribeImageAttribute':
         imageid = self.get_argument('ImageId')
         attribute = self.get_argument('Attribute')
         return clc.get_image_attribute(imageid, attribute)
     elif action == 'ModifyImageAttribute':
         imageid = self.get_argument('ImageId')
         attribute = self.get_argument('Attribute')
         operation = self.get_argument('OperationType')
         users = self.get_argument_list('UserId')
         groups = self.get_argument_list('UserGroup')
         return clc.modify_image_attribute(imageid, attribute, operation,
                                           users, groups)
     elif action == 'ResetImageAttribute':
         imageid = self.get_argument('ImageId')
         attribute = self.get_argument('Attribute')
         return clc.reset_image_attribute(imageid, attribute)
     elif action == 'RegisterImage':
         image_location = self.get_argument('ImageLocation', None)
         name = self.get_argument('Name')
         description = self.get_argument('Description', None)
         description = base64.b64decode(description)
         architecture = self.get_argument('Architecture', None)
         kernel_id = self.get_argument('KernelId', None)
         ramdisk_id = self.get_argument('RamdiskId', None)
         root_dev_name = self.get_argument('RootDeviceName', None)
         snapshot_id = self.get_argument('SnapshotId', None)
         # get block device mappings
         bdm = BlockDeviceMapping()
         mapping = self.get_argument('BlockDeviceMapping.1.DeviceName',
                                     None)
         idx = 1
         while mapping:
             pre = 'BlockDeviceMapping.%d' % idx
             dev_name = mapping
             block_dev_type = BlockDeviceType()
             block_dev_type.ephemeral_name = self.get_argument(
                 '%s.VirtualName' % pre, None)
             if not (block_dev_type.ephemeral_name):
                 block_dev_type.no_device = \
                     (self.get_argument('%s.NoDevice' % pre, '') == 'true')
                 block_dev_type.snapshot_id = \
                         self.get_argument('%s.Ebs.SnapshotId' % pre, None)
                 block_dev_type.size = \
                         self.get_argument('%s.Ebs.VolumeSize' % pre, None)
                 block_dev_type.delete_on_termination = \
                         (self.get_argument('%s.DeleteOnTermination' % pre, '') == 'true')
             bdm[dev_name] = block_dev_type
             idx += 1
             mapping = self.get_argument(
                 'BlockDeviceMapping.%d.DeviceName' % idx, None)
         if snapshot_id:
             rootbdm = BlockDeviceType()
             rootbdm.snapshot_id = snapshot_id
             bdm['/dev/sda1'] = rootbdm
         if len(bdm) == 0:
             bdm = None
         return clc.register_image(name, image_location, description,
                                   architecture, kernel_id, ramdisk_id,
                                   root_dev_name, bdm)
import os
os.environ["BOTO_CONFIG"]='C:/Dropbox/AWS/.boto'

import boto
from boto.ec2.blockdevicemapping import BlockDeviceType, BlockDeviceMapping
#connecting to the AWS s3
#s3 = boto.connect_s3()
#bucket = s3.get_bucket('aws-publicdatasets')
#k = bucket.get_key("common-crawl/parse-output/valid_segments.txt")
#s = k.get_contents_as_string()
#list = s.split("\n")

#connecting to the AWS ec2
ec2 = boto.connect_ec2()
xvdb = BlockDeviceType()
xvdb.ephemeral_name='ephemeral0'
bdm = BlockDeviceMapping()
bdm['/dev/xvdb'] = xvdb
image_id = 'ami-fb8e9292'
image_name = 'Amazon Linux AMI 2014.03.1'
new_reservation = ec2.run_instances(image_id=image_id, key_name='ec2-key', security_groups=['web'], instance_type='m1.large', block_device_map=bdm)

instance = new_reservation.instances[0]

# Wait a minute or two while it boots
print "Spinning up instance for '%s' - %s. Waiting for it to boot up." % (image_id, image_name)
while instance.state != 'running':
    print ".",
    time.sleep(1)
    instance.update()
print " "
Exemple #55
0
def launch_cluster(conn, OPTS, cluster_name):
    print "Setting up security groups..."
    master_group = get_or_make_group(conn, cluster_name + "-master")
    slave_group = get_or_make_group(conn, cluster_name + "-slaves")
    ambari_group = get_or_make_group(conn, cluster_name + "-ambari")

    if master_group.rules == []:  # Group was just now created
        master_group.authorize(src_group=master_group)
        master_group.authorize(src_group=slave_group)
        master_group.authorize(src_group=ambari_group)
        # TODO: Currently Group is completely open
        master_group.authorize('tcp', 0, 65535, '0.0.0.0/0')
    if slave_group.rules == []:  # Group was just now created
        slave_group.authorize(src_group=master_group)
        slave_group.authorize(src_group=slave_group)
        slave_group.authorize(src_group=ambari_group)
        # TODO: Currently Group is completely open
        slave_group.authorize('tcp', 0, 65535, '0.0.0.0/0')
    if ambari_group.rules == []:  # Group was just now created
        ambari_group.authorize(src_group=master_group)
        ambari_group.authorize(src_group=slave_group)
        ambari_group.authorize(src_group=ambari_group)
        # TODO: Currently Group is completely open
        ambari_group.authorize('tcp', 0, 65535, '0.0.0.0/0')

    # Check if instances are already running in our groups
    if OPTS.resume:
        return get_existing_cluster(conn,
                                    OPTS,
                                    cluster_name,
                                    die_on_error=False)
    else:
        active_nodes = get_existing_cluster(conn,
                                            OPTS,
                                            cluster_name,
                                            die_on_error=False)
        if any(active_nodes):
            print >> stderr, (
                "ERROR: There are already instances running in " +
                "group %s or %s" % (master_group.name, slave_group.name))
            sys.exit(1)

        print "Launching instances..."

        try:
            image = conn.get_all_images(image_ids=[OPTS.ami])[0]
        except:
            print >> stderr, "Could not find AMI " + OPTS.ami
            sys.exit(1)

        # Create block device mapping so that we can add an EBS volume if asked to
        block_map = BlockDeviceMapping()
        device = BlockDeviceType()
        device.ephemeral_name = 'ephemeral0'
        device.delete_on_termination = True
        block_map["/dev/sdv"] = device

        # assume master and ambari hosts have the same instance type
        master_type = OPTS.master_instance_type
        if master_type == "":
            master_type = OPTS.instance_type

        # Launch slaves
        if OPTS.spot_price != None:
            # Launch spot instances with the requested price
            num_spot_instances = OPTS.slaves + 2  # slaves, ambari host, master
            print("Requesting %d slaves as spot instances with price $%.3f" %
                  (num_spot_instances, OPTS.spot_price))
            zones = get_zones(conn, OPTS)
            num_zones = len(zones)
            i = 0
            my_req_ids = []
            ambari_req_ids = []
            master_req_ids = []
            for zone in zones:
                num_slaves_this_zone = get_partition(OPTS.slaves, num_zones, i)
                ambari_reqs = conn.request_spot_instances(
                    price=OPTS.spot_price,
                    image_id=OPTS.ami,
                    launch_group="launch-group-%s" % cluster_name,
                    placement=zone,
                    count=1,
                    key_name=OPTS.key_pair,
                    security_groups=[ambari_group],
                    instance_type=master_type,
                    block_device_map=block_map)
                master_reqs = conn.request_spot_instances(
                    price=OPTS.spot_price,
                    image_id=OPTS.ami,
                    launch_group="launch-group-%s" % cluster_name,
                    placement=zone,
                    count=1,
                    key_name=OPTS.key_pair,
                    security_groups=[master_group],
                    instance_type=master_type,
                    block_device_map=block_map)
                slave_reqs = conn.request_spot_instances(
                    price=OPTS.spot_price,
                    image_id=OPTS.ami,
                    launch_group="launch-group-%s" % cluster_name,
                    placement=zone,
                    count=num_slaves_this_zone,
                    key_name=OPTS.key_pair,
                    security_groups=[slave_group],
                    instance_type=OPTS.instance_type,
                    block_device_map=block_map)
                my_req_ids += [req.id for req in slave_reqs]
                ambari_req_ids += [req.id for req in ambari_reqs]
                master_req_ids += [req.id for req in master_reqs]
                i += 1

            print "Waiting for spot instances to be granted..."
            try:
                while True:
                    time.sleep(10)
                    reqs = conn.get_all_spot_instance_requests()
                    id_to_req = {}
                    for r in reqs:
                        id_to_req[r.id] = r
                    active_instance_ids = []
                    ambari_instance_ids = []
                    master_instance_ids = []
                    for i in my_req_ids:
                        if i in id_to_req and id_to_req[i].state == "active":
                            active_instance_ids.append(
                                id_to_req[i].instance_id)
                    for i in master_req_ids:
                        if i in id_to_req and id_to_req[i].state == "active":
                            master_instance_ids.append(
                                id_to_req[i].instance_id)
                    for i in ambari_req_ids:
                        if i in id_to_req and id_to_req[i].state == "active":
                            ambari_instance_ids.append(
                                id_to_req[i].instance_id)
                    if len(active_instance_ids) == OPTS.slaves and len(
                            master_instance_ids) == 1 and len(
                                ambari_instance_ids) == 1:
                        print "All %d slaves, 1 master, 1 ambari host granted" % OPTS.slaves
                        slave_nodes = []
                        master_nodes = []
                        ambari_nodes = []
                        for r in conn.get_all_instances(active_instance_ids):
                            slave_nodes += r.instances
                        for r in conn.get_all_instances(master_instance_ids):
                            master_nodes += r.instances
                        for r in conn.get_all_instances(ambari_instance_ids):
                            ambari_nodes += r.instances
                        break
                    else:
                        print "%d of %d spot instance requests granted, waiting longer" % (
                            len(active_instance_ids), num_spot_instances)
            except Exception as e:
                print e
                print "Canceling spot instance requests"
                conn.cancel_spot_instance_requests(my_req_ids)
                # Log a warning if any of these requests actually launched instances:
                (master_nodes, slave_nodes,
                 ambari_nodes) = get_existing_cluster(conn,
                                                      OPTS,
                                                      cluster_name,
                                                      die_on_error=False)
                running = len(master_nodes) + len(slave_nodes) + len(
                    ambari_nodes)
                if running:
                    print >> stderr, (
                        "WARNING: %d instances are still running" % running)
                sys.exit(0)
        else:
            # Launch non-spot instances
            zones = get_zones(conn, OPTS)
            num_zones = len(zones)
            i = 0
            slave_nodes = []
            for zone in zones:
                num_slaves_this_zone = get_partition(OPTS.slaves, num_zones, i)
                if num_slaves_this_zone > 0:
                    slave_res = image.run(key_name=OPTS.key_pair,
                                          security_groups=[slave_group],
                                          instance_type=OPTS.instance_type,
                                          placement=zone,
                                          min_count=num_slaves_this_zone,
                                          max_count=num_slaves_this_zone,
                                          block_device_map=block_map)
                    slave_nodes += slave_res.instances
                    print "Launched %d slaves in %s, regid = %s" % (
                        num_slaves_this_zone, zone, slave_res.id)
                i += 1

            # Launch masters
            if OPTS.zone == 'all':
                OPTS.zone = random.choice(conn.get_all_zones()).name
            master_res = image.run(key_name=OPTS.key_pair,
                                   security_groups=[master_group],
                                   instance_type=master_type,
                                   placement=OPTS.zone,
                                   min_count=1,
                                   max_count=1,
                                   block_device_map=block_map)
            master_nodes = master_res.instances
            print "Launched master in %s, regid = %s" % (zone, master_res.id)

            ambari_type = master_type
            if OPTS.zone == 'all':
                OPTS.zone = random.choice(conn.get_all_zones()).name
            ambari_res = image.run(key_name=OPTS.key_pair,
                                   security_groups=[ambari_group],
                                   instance_type=ambari_type,
                                   placement=OPTS.zone,
                                   min_count=1,
                                   max_count=1,
                                   block_device_map=block_map)
            ambari_nodes = ambari_res.instances
            print "Launched ambari in %s, regid = %s" % (zone, ambari_res.id)

        # Return all the instances
        return (master_nodes, slave_nodes, ambari_nodes)
Exemple #56
0
def start_instances(conn, imageId, instanceType, num_instances, master=False):
    cur_instances = Set([])
    if os.path.isfile('musketeer_ec2_workers'):
        for line in open('musketeer_ec2_workers').readlines():
            fields = [x.strip() for x in line.split()]
            cur_instances.add(fields[0])
    if os.path.isfile('musketeer_ec2_master'):
        for line in open('musketeer_ec2_master').readlines():
            fields = [x.strip() for x in line.split()]
            cur_instances.add(fields[0])
    reservations = conn.get_all_reservations()
    for reservation in reservations:
        for instance in reservation.instances:
            instance.update()
            if instance.state == u'running':
                cur_instances.add(instance.id)
    if master is True:
        instances_file = open("musketeer_ec2_master", "w")
    else:
        instances_file = open("musketeer_ec2_workers", "a")
    instances = []
    bdm = BlockDeviceMapping()
    eph0 = BlockDeviceType()
    eph0.ephemeral_name = 'ephemeral0'
    bdm['/dev/sdb'] = eph0
    spot_request = conn.request_spot_instances(
        price="0.8",
        image_id=imageId,
        count=num_instances,
        launch_group="us-east-1a",
        availability_zone_group="us-east-1a",
        key_name="Musketeer",
        security_groups=["Musketeer"],
        instance_type=instanceType,
        block_device_map=bdm,
        user_data="")
    spot_request = conn.get_all_spot_instance_requests(
        request_ids=[spot_request[0].id])[0]

    # Wait until requests gets fulfilled
    open_request = True
    while open_request:
        time.sleep(5)
        open_request = False
        spot_requests = conn.get_all_spot_instance_requests(
            request_ids=[spot_request.id])
        for spot_request in spot_requests:
            print 'Spot request status: ', spot_request.state
            if spot_request.state == 'open':
                open_request = True

    # Get info about instances
    instance_num = 0
    reservations = conn.get_all_reservations()
    for reservation in reservations:
        for instance in reservation.instances:
            instance.update()
            while instance.state == u'pending':
                time.sleep(1)
                instance.update()
            if instance.state == u'running':
                if instance.id not in cur_instances:
                    instance_num += 1
                    print 'Started instance %d: %s' % (instance_num, instance)
                    instances.append(instance.public_dns_name)
                    instances_file.write(instance.id + ' ' +
                                         instance.public_dns_name + '\n')
            else:
                print 'Could not start instance: ', instance
    instances_file.close()
    return instances
Exemple #57
0
def rebundle(reboot_if_needed=False):
    """
    Rebundles the EC2 instance that is passed as the -H parameter
    This script handles all aspects of the rebundling process and is (almost) fully automated.
    Two things should be edited and provided before invoking it: AWS account information 
    and the desired size of the root volume for the new instance.  
     
    :rtype: bool
    :return: If instance was successfully rebundled and an AMI ID was received,
             return True.
             False, otherwise.
    """
    _check_fabric_version()
    time_start = dt.datetime.utcnow()
    print "Rebundling instance '%s'. Start time: %s" % (env.hosts[0], time_start)
    _amazon_ec2_environment()
    if boto:
        # Select appropriate region:
        availability_zone = run("curl --silent http://169.254.169.254/latest/meta-data/placement/availability-zone")
        instance_region = availability_zone[:-1] # Truncate zone letter to get region name
        ec2_conn = _get_ec2_conn(instance_region)
        
        # hostname = env.hosts[0] # -H flag to fab command sets this variable so get only 1st hostname
        instance_id = run("curl --silent http://169.254.169.254/latest/meta-data/instance-id")
        
        # Get the size (in GB) of the root partition for the new image
        vol_size = _get_root_vol_size(ec2_conn, instance_id)
        
        # Handle reboot if required
        if not _reboot(instance_id, reboot_if_needed):
            return False # Indicates that rebundling was not completed and should be restarted
        
        _clean() # Clean up the environment before rebundling
        image_id = None
        kernel_id = run("curl --silent http://169.254.169.254/latest/meta-data/kernel-id")
        if ec2_conn and instance_id and availability_zone and kernel_id:
            print "Rebundling instance with ID '%s' in region '%s'" % (instance_id, ec2_conn.region.name)
            try:
                # Need 2 volumes - one for image (rsync) and the other for the snapshot (see instance-to-ebs-ami.sh)
                vol = ec2_conn.create_volume(vol_size, availability_zone)
                vol2 = ec2_conn.create_volume(vol_size, availability_zone)
                # TODO: wait until it becomes 'available'
                print "Created 2 new volumes of size '%s' with IDs '%s' and '%s'" % (vol_size, vol.id, vol2.id)
            except EC2ResponseError, e:
                print(red("Error creating volume: %s" % e))
                return False
            
            if vol:
                try:
                    # Attach newly created volumes to the instance
                    dev_id = '/dev/sdh'
                    if not _attach(ec2_conn, instance_id, vol.id, dev_id):
                        print(red("Error attaching volume '%s' to the instance. Aborting." % vol.id))
                        return False
                    dev_id = '/dev/sdj'
                    if not _attach(ec2_conn, instance_id, vol2.id, dev_id):
                        print(red("Error attaching volume '%s' to the instance. Aborting." % vol2.id))
                        return False
                    # Move the file system onto the new volume (with a help of a script)
                    url = os.path.join(REPO_ROOT_URL, "instance-to-ebs-ami.sh")
                    # with contextlib.nested(cd('/tmp'), settings(hide('stdout', 'stderr'))):
                    with cd('/tmp'):
                        if exists('/tmp/'+os.path.split(url)[1]):
                            sudo('rm /tmp/'+os.path.split(url)[1])
                        sudo('wget %s' % url)
                        sudo('chmod u+x /tmp/%s' % os.path.split(url)[1])
                        sudo('./%s' % os.path.split(url)[1])
                    # Detach the new volume
                    _detach(ec2_conn, instance_id, vol.id)
                    _detach(ec2_conn, instance_id, vol2.id)
                    answer = confirm("Would you like to terminate the instance used during rebundling?", default=False)
                    if answer:
                        ec2_conn.terminate_instances([instance_id])
                    # Create a snapshot of the new volume
                    commit_num = local('cd %s; hg tip | grep changeset | cut -d: -f2' % os.getcwd()).strip()
                    snap_id = _create_snapshot(ec2_conn, vol.id, "AMI: galaxy-cloudman (using mi-deployment at commit %s)" % commit_num)
                    # Register the snapshot of the new volume as a machine image (i.e., AMI)
                    arch = 'x86_64'
                    root_device_name = '/dev/sda1'
                    # Extra info on how EBS image registration is done: http://markmail.org/message/ofgkyecjktdhofgz
                    # http://www.elastician.com/2009/12/creating-ebs-backed-ami-from-s3-backed.html
                    # http://www.shlomoswidler.com/2010/01/creating-consistent-snapshots-of-live.html
                    ebs = BlockDeviceType()
                    ebs.snapshot_id = snap_id
                    ebs.delete_on_termination = True
                    ephemeral0_device_name = '/dev/sdb'
                    ephemeral0 = BlockDeviceType()
                    ephemeral0.ephemeral_name = 'ephemeral0'
                    ephemeral1_device_name = '/dev/sdc'
                    ephemeral1 = BlockDeviceType()
                    ephemeral1.ephemeral_name = 'ephemeral1'
                    # ephemeral2_device_name = '/dev/sdd' # Needed for instances w/ 3 ephemeral disks
                    # ephemeral2 = BlockDeviceType()
                    # ephemeral2.ephemeral_name = 'ephemeral2'
                    # ephemeral3_device_name = '/dev/sde' # Needed for instances w/ 4 ephemeral disks
                    # ephemeral3 = BlockDeviceType()
                    # ephemeral3.ephemeral_name = 'ephemeral3'
                    block_map = BlockDeviceMapping()
                    block_map[root_device_name] = ebs
                    block_map[ephemeral0_device_name] = ephemeral0
                    block_map[ephemeral1_device_name] = ephemeral1
                    print(yellow('galaxy-cloudman-%s' % time_start.strftime("%Y-%m-%d")))
                    name, desc = _get_image_name()
                    image_id = ec2_conn.register_image(name, description=desc, architecture=arch,
                        kernel_id=kernel_id, root_device_name=root_device_name, block_device_map=block_map)
                    answer = confirm("Volume with ID '%s' was created and used to make this AMI but is not longer needed. Would you like to delete it?" % vol.id)
                    if answer:
                        ec2_conn.delete_volume(vol.id)
                    print "Deleting the volume (%s) used for rsync only" % vol2.id
                    ec2_conn.delete_volume(vol2.id)
                    print(green("--------------------------"))
                    print(green("Finished creating new machine image. Image ID: '%s'" % (image_id)))
                    print(green("--------------------------"))
                    answer = confirm("Would you like to make this machine image public?", default=False)
                    if image_id and answer:
                        ec2_conn.modify_image_attribute(image_id, attribute='launchPermission', operation='add', groups=['all'])
                except EC2ResponseError, e:
                    print(red("Error creating image: %s" % e))
                    return False
            else:
                print(red("Error creating new volume"))
                return False
def AwsStartFromConfigSpec(args, configSpec):

    if configSpec['Verbose']:
        log.info("[AwsStartInstanceImpl]")

    k, s = AwsLoadCreds(configSpec)
    conn = AwsConnect(configSpec, k, s)

    reservation = None
    subnetId = None
    securityGroups = None
    securityGroupIds = None
    shutdownBehavior = None

    #NOTE: shutdown behavior can only be defined for EBS backed instances
    if 'EbsInstance' in configSpec:
        if configSpec['EbsInstance']:
            shutdownBehavior = 'terminate'
    else:
        shutdownBehavior = 'stop'

    if configSpec['InVpc']:
        securityGroupIds = configSpec['SecurityGroups']
    else:
        securityGroups = configSpec['SecurityGroups']

    deviceMap = None
    ebsOptimized = False
    if 'EBSRaid' in configSpec and configSpec["EBSRaid"]:
        ebsOptimized = True
        deviceMap = BlockDeviceMapping()
        if configSpec['InstanceType'] == 'm3.2xlarge':
            ep0 = BlockDeviceType()
            ep0.ephemeral_name = 'ephemeral0'
            deviceMap['/dev/xvdb'] = ep0
            ep1 = BlockDeviceType()
            ep1.ephemeral_name = 'ephemeral1'
            deviceMap['/dev/xvdc'] = ep1

        if configSpec['InstanceType'] == 'c3.4xlarge':
            ep0 = BlockDeviceType()
            ep0.ephemeral_name = 'ephemeral0'
            deviceMap['/dev/xvdb'] = ep0
            ep1 = BlockDeviceType()
            ep1.ephemeral_name = 'ephemeral1'
            deviceMap['/dev/xvdc'] = ep1

    if 'EphemeralRaid' in configSpec and configSpec['EphemeralRaid']:
        if configSpec['Verbose']:
            log.info(
                "AwsStartFromConfigSpec(): configSpec['EphemeralRaid']=%s (%s)"
                % (configSpec['EphemeralRaid'], configSpec['InstanceType']))
        deviceMap = BlockDeviceMapping()
        if configSpec['InstanceType'] == 'm1.xlarge':
            ep0 = BlockDeviceType()
            ep0.ephemeral_name = 'ephemeral0'
            deviceMap['/dev/sdb'] = ep0
            ep1 = BlockDeviceType()
            ep1.ephemeral_name = 'ephemeral1'
            deviceMap['/dev/sdc'] = ep1
            ep2 = BlockDeviceType()
            ep2.ephemeral_name = 'ephemeral2'
            deviceMap['/dev/sdd'] = ep2
            ep3 = BlockDeviceType()
            ep3.ephemeral_name = 'ephemeral3'
            deviceMap['/dev/sde'] = ep3
        elif configSpec['InstanceType'] == 'm1.large':
            ep0 = BlockDeviceType()
            ep0.ephemeral_name = 'ephemeral0'
            deviceMap['/dev/sdb'] = ep0
            ep1 = BlockDeviceType()
            ep1.ephemeral_name = 'ephemeral1'
            deviceMap['/dev/sdc'] = ep1
        elif configSpec['InstanceType'] == 'c3.8xlarge':
            ep0 = BlockDeviceType()
            ep0.ephemeral_name = 'ephemeral0'
            deviceMap['/dev/sdb'] = ep0
            ep1 = BlockDeviceType()
            ep1.ephemeral_name = 'ephemeral1'
            deviceMap['/dev/sdc'] = ep1
        elif configSpec['InstanceType'] == 'i2.2xlarge':
            ep0 = BlockDeviceType()
            ep0.ephemeral_name = 'ephemeral0'
            deviceMap['/dev/xvdb'] = ep0
            ep1 = BlockDeviceType()
            ep1.ephemeral_name = 'ephemeral1'
            deviceMap['/dev/xvdc'] = ep1
        else:
            log.error(
                "AwsStartFromConfigSpec(): unexpected instance type => %s" %
                configSpec['InstanceType'])
            sys.exit(-1)

    ami_image = configSpec['AmiId']

    aws_az = args.az

    subnet = configSpec['Subnets'][aws_az]['subnet']

    try:
        log.info("Launching in AZ: {0}".format(aws_az))
        if args.dryrun == True:
            log.warn("DRY RUN, NOT LAUNCHING....")
            sys.exit()
        else:
            reservation = conn.run_instances(
                ami_image,
                placement=aws_az,
                min_count=settings['total-nodes'],
                max_count=settings['total-nodes'],
                instance_initiated_shutdown_behavior=shutdownBehavior,
                instance_type=configSpec['InstanceType'],
                key_name=configSpec['SshKeys']['KeyPairName'],
                subnet_id=subnet,
                security_groups=securityGroups,
                security_group_ids=securityGroupIds,
                ebs_optimized=ebsOptimized,
                block_device_map=deviceMap)

    except boto.exception.EC2ResponseError as x:
        log.error("Failed to start an AWS instance: %s" % x)
        return

    except Exception as e:
        log.error("Got reservation error", e)
        return

    if reservation:
        log.info('Waiting for VM instances to start...')

    time.sleep(15)

    instanceSetInfo = []
    instanceIds = [
    ]  # stores a list of all the active instanceids we can use to attach ebs volumes to
    instanceIps = []
    isFirst = True
    firstNodeIp = None
    for i, instance in enumerate(reservation.instances):
        status = instance.update()
        while not status == 'running':
            log.info("Instance status: %s" % status)
            if status == 'terminated':
                sys.exit(-1)

            time.sleep(4)
            status = instance.update()

        if configSpec['Verbose']:
            log.info("Instance ID: %s" % instance.id)
            log.info("Instance Private IP: %s" % instance.private_ip_address)
            log.info("Instance Public DNS: %s" % instance.public_dns_name)

        if isFirst:
            firstNodeIp = instance.private_ip_address

        info = {
            'Id': instance.id,
            'PrivateIp': instance.private_ip_address,
            'PublicDnsName': instance.public_dns_name,
            'FirstNode': isFirst
        }
        isFirst = False
        instanceSetInfo.append(info)
        instanceIds.append(instance.id)
        instanceIps.append(instance.private_ip_address)

    tags = configSpec['Tags']

    for instance in reservation.instances:
        conn.create_tags([instance.id], tags)

    ips = ",".join(instanceIps)
    print "IPS ARE"
    print "-" * 10
    print ips
    print "-" * 10
    print
    for ip in instanceIps:
        print ip
    print